{"blob_id": "b0d3940833fa3b5e9c6c6608f381109890bbd320", "bodies": ["cnfg = ConfigurationModel.find()\nif cnfg:\n return cnfg.tojson()\nreturn ({'message': 'Client settings not found'}, 404)", "data = resources.parsers.ParseConfiguration.parser.parse_args()\ncnfg = ConfigurationModel.find()\nif cnfg is None:\n return ({'message': 'No configuration available'}, 400)\nif not check_fpq(data['global_f'], data['global_p'], data['global_q']):\n return ({'message': 'Global f,p and q must have values between 0.0 and 1.0'}, 400)\ncnfg.global_f = data['global_f']\ncnfg.global_p = data['global_p']\ncnfg.global_q = data['global_q']\ncnfg.dsgvo = data['dsgvo']\ncnfg.quizmode = data['quizmode']\ntry:\n cnfg.save_to_db()\nexcept:\n return ({'message': 'Error while setting configuration data.'}, 500)\nreturn (cnfg.tojson(), 201)"], "bodies_text": "<|body_start_0|>\n cnfg = ConfigurationModel.find()\n if cnfg:\n return cnfg.tojson()\n return ({'message': 'Client settings not found'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n data = resources.parsers.ParseConfiguration.parser.parse_args()\n cnfg = ConfigurationModel.find()\n if cnfg is None:\n return ({'message': 'No configuration available'}, 400)\n if not check_fpq(data['global_f'], data['global_p'], data['global_q']):\n return ({'message': 'Global f,p and q must have values between 0.0 and 1.0'}, 400)\n cnfg.global_f = data['global_f']\n cnfg.global_p = data['global_p']\n cnfg.global_q = data['global_q']\n cnfg.dsgvo = data['dsgvo']\n cnfg.quizmode = data['quizmode']\n try:\n cnfg.save_to_db()\n except:\n return ({'message': 'Error while setting configuration data.'}, 500)\n return (cnfg.tojson(), 201)\n<|end_body_1|>\n", "class_docstring": "REST API for client configuration.", "class_name": "Configuration", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Configuration:\n \"\"\"REST API for client configuration.\"\"\"\n\n def get(self):\n \"\"\"Returns the configuration in JSON format.\"\"\"\n <|body_0|>\n\n def put(self):\n \"\"\"Allows to change the configuration. There is no post request, because the configuration is treated as singleton.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnfg = ConfigurationModel.find()\n if cnfg:\n return cnfg.tojson()\n return ({'message': 'Client settings not found'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n data = resources.parsers.ParseConfiguration.parser.parse_args()\n cnfg = ConfigurationModel.find()\n if cnfg is None:\n return ({'message': 'No configuration available'}, 400)\n if not check_fpq(data['global_f'], data['global_p'], data['global_q']):\n return ({'message': 'Global f,p and q must have values between 0.0 and 1.0'}, 400)\n cnfg.global_f = data['global_f']\n cnfg.global_p = data['global_p']\n cnfg.global_q = data['global_q']\n cnfg.dsgvo = data['dsgvo']\n cnfg.quizmode = data['quizmode']\n try:\n cnfg.save_to_db()\n except:\n return ({'message': 'Error while setting configuration data.'}, 500)\n return (cnfg.tojson(), 201)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000000", "length_bytes": 1460, "license_type": "no_license", "methods": [{"docstring": "Returns the configuration in JSON format.", "name": "get", "signature": "def get(self)"}, {"docstring": "Allows to change the configuration. There is no post request, because the configuration is treated as singleton.", "name": "put", "signature": "def put(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015906", "prompt": "Implement the Python class `Configuration` described below.\n\nClass description:\nREST API for client configuration.\n\nMethod signatures and docstrings:\n- def get(self): Returns the configuration in JSON format.\n- def put(self): Allows to change the configuration. There is no post request, because the configuration is treated as singleton.", "prompted_full_text": "Implement the Python class `Configuration` described below.\n\nClass description:\nREST API for client configuration.\n\nMethod signatures and docstrings:\n- def get(self): Returns the configuration in JSON format.\n- def put(self): Allows to change the configuration. There is no post request, because the configuration is treated as singleton.\n\n<|skeleton|>\nclass Configuration:\n \"\"\"REST API for client configuration.\"\"\"\n\n def get(self):\n \"\"\"Returns the configuration in JSON format.\"\"\"\n <|body_0|>\n\n def put(self):\n \"\"\"Allows to change the configuration. There is no post request, because the configuration is treated as singleton.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnfg = ConfigurationModel.find()\n if cnfg:\n return cnfg.tojson()\n return ({'message': 'Client settings not found'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n data = resources.parsers.ParseConfiguration.parser.parse_args()\n cnfg = ConfigurationModel.find()\n if cnfg is None:\n return ({'message': 'No configuration available'}, 400)\n if not check_fpq(data['global_f'], data['global_p'], data['global_q']):\n return ({'message': 'Global f,p and q must have values between 0.0 and 1.0'}, 400)\n cnfg.global_f = data['global_f']\n cnfg.global_p = data['global_p']\n cnfg.global_q = data['global_q']\n cnfg.dsgvo = data['dsgvo']\n cnfg.quizmode = data['quizmode']\n try:\n cnfg.save_to_db()\n except:\n return ({'message': 'Error while setting configuration data.'}, 500)\n return (cnfg.tojson(), 201)\n<|end_body_1|>\n", "revision_id": "619a7040ab339097b19cf5daccf94c58ee4e2870", "skeleton": "<|skeleton|>\nclass Configuration:\n \"\"\"REST API for client configuration.\"\"\"\n\n def get(self):\n \"\"\"Returns the configuration in JSON format.\"\"\"\n <|body_0|>\n\n def put(self):\n \"\"\"Allows to change the configuration. There is no post request, because the configuration is treated as singleton.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Configuration:\n \"\"\"REST API for client configuration.\"\"\"\n\n def get(self):\n \"\"\"Returns the configuration in JSON format.\"\"\"\n cnfg = ConfigurationModel.find()\n if cnfg:\n return cnfg.tojson()\n return ({'message': 'Client settings not found'}, 404)\n\n def put(self):\n \"\"\"Allows to change the configuration. There is no post request, because the configuration is treated as singleton.\"\"\"\n data = resources.parsers.ParseConfiguration.parser.parse_args()\n cnfg = ConfigurationModel.find()\n if cnfg is None:\n return ({'message': 'No configuration available'}, 400)\n if not check_fpq(data['global_f'], data['global_p'], data['global_q']):\n return ({'message': 'Global f,p and q must have values between 0.0 and 1.0'}, 400)\n cnfg.global_f = data['global_f']\n cnfg.global_p = data['global_p']\n cnfg.global_q = data['global_q']\n cnfg.dsgvo = data['dsgvo']\n cnfg.quizmode = data['quizmode']\n try:\n cnfg.save_to_db()\n except:\n return ({'message': 'Error while setting configuration data.'}, 500)\n return (cnfg.tojson(), 201)\n", "source": "the_stack_v2_python_sparse", "source_path": "client/resources/config.py", "source_repo": "yrtsprmb/zappor", "split": "test", "star_events_count": 0} {"blob_id": "228d6b37a7b521dd03e19d21ca25cd49b0738e98", "bodies": ["if target == trackSum:\n self.res.append(track[:])\n return\ni = 0\nfor key, val in dic.items():\n if i < k or val <= 0 or trackSum + key > target:\n i += 1\n continue\n track.append(key)\n trackSum += key\n dic[key] -= 1\n if dic[key] == 0:\n self.backtrack(dic, i + 1, track, trackSum, target)\n else:\n self.backtrack(dic, i, track, trackSum, target)\n track.pop()\n trackSum -= key\n dic[key] += 1\n i += 1", "self.res = []\nif len(candidates) == 0:\n return self.res\ndic = {}\nfor num in candidates:\n if num in dic:\n dic[num] += 1\n else:\n dic[num] = 1\ntrack, trackSum = ([], 0)\nself.backtrack(dic, 0, track, trackSum, target)\nreturn self.res"], "bodies_text": "<|body_start_0|>\n if target == trackSum:\n self.res.append(track[:])\n return\n i = 0\n for key, val in dic.items():\n if i < k or val <= 0 or trackSum + key > target:\n i += 1\n continue\n track.append(key)\n trackSum += key\n dic[key] -= 1\n if dic[key] == 0:\n self.backtrack(dic, i + 1, track, trackSum, target)\n else:\n self.backtrack(dic, i, track, trackSum, target)\n track.pop()\n trackSum -= key\n dic[key] += 1\n i += 1\n<|end_body_0|>\n\n<|body_start_1|>\n self.res = []\n if len(candidates) == 0:\n return self.res\n dic = {}\n for num in candidates:\n if num in dic:\n dic[num] += 1\n else:\n dic[num] = 1\n track, trackSum = ([], 0)\n self.backtrack(dic, 0, track, trackSum, target)\n return self.res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def backtrack(self, dic, k, track, trackSum, target):\n \"\"\":type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int\"\"\"\n <|body_0|>\n\n def combinationSum2(self, candidates, target):\n \"\"\":type candidates: List[int] :type target: int :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if target == trackSum:\n self.res.append(track[:])\n return\n i = 0\n for key, val in dic.items():\n if i < k or val <= 0 or trackSum + key > target:\n i += 1\n continue\n track.append(key)\n trackSum += key\n dic[key] -= 1\n if dic[key] == 0:\n self.backtrack(dic, i + 1, track, trackSum, target)\n else:\n self.backtrack(dic, i, track, trackSum, target)\n track.pop()\n trackSum -= key\n dic[key] += 1\n i += 1\n<|end_body_0|>\n\n<|body_start_1|>\n self.res = []\n if len(candidates) == 0:\n return self.res\n dic = {}\n for num in candidates:\n if num in dic:\n dic[num] += 1\n else:\n dic[num] = 1\n track, trackSum = ([], 0)\n self.backtrack(dic, 0, track, trackSum, target)\n return self.res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000001", "length_bytes": 2061, "license_type": "no_license", "methods": [{"docstring": ":type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int", "name": "backtrack", "signature": "def backtrack(self, dic, k, track, trackSum, target)"}, {"docstring": ":type candidates: List[int] :type target: int :rtype: List[List[int]]", "name": "combinationSum2", "signature": "def combinationSum2(self, candidates, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032662", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def backtrack(self, dic, k, track, trackSum, target): :type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int\n- def combinationSum2(self, candidates, target): :type candidates: List[int] :type target: int :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def backtrack(self, dic, k, track, trackSum, target): :type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int\n- def combinationSum2(self, candidates, target): :type candidates: List[int] :type target: int :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def backtrack(self, dic, k, track, trackSum, target):\n \"\"\":type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int\"\"\"\n <|body_0|>\n\n def combinationSum2(self, candidates, target):\n \"\"\":type candidates: List[int] :type target: int :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if target == trackSum:\n self.res.append(track[:])\n return\n i = 0\n for key, val in dic.items():\n if i < k or val <= 0 or trackSum + key > target:\n i += 1\n continue\n track.append(key)\n trackSum += key\n dic[key] -= 1\n if dic[key] == 0:\n self.backtrack(dic, i + 1, track, trackSum, target)\n else:\n self.backtrack(dic, i, track, trackSum, target)\n track.pop()\n trackSum -= key\n dic[key] += 1\n i += 1\n<|end_body_0|>\n\n<|body_start_1|>\n self.res = []\n if len(candidates) == 0:\n return self.res\n dic = {}\n for num in candidates:\n if num in dic:\n dic[num] += 1\n else:\n dic[num] = 1\n track, trackSum = ([], 0)\n self.backtrack(dic, 0, track, trackSum, target)\n return self.res\n<|end_body_1|>\n", "revision_id": "e82305a822ea200086a0978a29c18ab65a3b18fb", "skeleton": "<|skeleton|>\nclass Solution:\n\n def backtrack(self, dic, k, track, trackSum, target):\n \"\"\":type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int\"\"\"\n <|body_0|>\n\n def combinationSum2(self, candidates, target):\n \"\"\":type candidates: List[int] :type target: int :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def backtrack(self, dic, k, track, trackSum, target):\n \"\"\":type dic: dict[int:int] :type k: int :type track: List[int] :type trackSum: int :type target: int\"\"\"\n if target == trackSum:\n self.res.append(track[:])\n return\n i = 0\n for key, val in dic.items():\n if i < k or val <= 0 or trackSum + key > target:\n i += 1\n continue\n track.append(key)\n trackSum += key\n dic[key] -= 1\n if dic[key] == 0:\n self.backtrack(dic, i + 1, track, trackSum, target)\n else:\n self.backtrack(dic, i, track, trackSum, target)\n track.pop()\n trackSum -= key\n dic[key] += 1\n i += 1\n\n def combinationSum2(self, candidates, target):\n \"\"\":type candidates: List[int] :type target: int :rtype: List[List[int]]\"\"\"\n self.res = []\n if len(candidates) == 0:\n return self.res\n dic = {}\n for num in candidates:\n if num in dic:\n dic[num] += 1\n else:\n dic[num] = 1\n track, trackSum = ([], 0)\n self.backtrack(dic, 0, track, trackSum, target)\n return self.res\n", "source": "the_stack_v2_python_sparse", "source_path": "Combination Sum II.py", "source_repo": "ramchinta/python", "split": "test", "star_events_count": 0} {"blob_id": "65e8abc6e573cc4c58ca1d1350c6cf18a8fa8b63", "bodies": ["self.prediction_fn = prediction_fn\nself.ngram_order = ngram_order\nif name is None:\n name = f'Distinct_{ngram_order}grams'\nsuper().__init__(name=name)", "if self.prediction_fn is not None:\n y_pred = self.prediction_fn(y_pred)\ntokenized_y_pred = [nltk.tokenize.word_tokenize(y) for y in y_pred]\nngrams = list(chain(*[[gram for gram in nltk.ngrams(y, self.ngram_order)] for y in tokenized_y_pred]))\nscore = len(set(ngrams)) / len(ngrams)\nreturn torch.tensor(score, dtype=torch.float)"], "bodies_text": "<|body_start_0|>\n self.prediction_fn = prediction_fn\n self.ngram_order = ngram_order\n if name is None:\n name = f'Distinct_{ngram_order}grams'\n super().__init__(name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.prediction_fn is not None:\n y_pred = self.prediction_fn(y_pred)\n tokenized_y_pred = [nltk.tokenize.word_tokenize(y) for y in y_pred]\n ngrams = list(chain(*[[gram for gram in nltk.ngrams(y, self.ngram_order)] for y in tokenized_y_pred]))\n score = len(set(ngrams)) / len(ngrams)\n return torch.tensor(score, dtype=torch.float)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Distinct_Ngrams", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Distinct_Ngrams:\n\n def __init__(self, prediction_fn=None, name=None, ngram_order=1):\n \"\"\"Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric\"\"\"\n <|body_0|>\n\n def _compute(self, y_pred, y_true):\n \"\"\"Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prediction_fn = prediction_fn\n self.ngram_order = ngram_order\n if name is None:\n name = f'Distinct_{ngram_order}grams'\n super().__init__(name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.prediction_fn is not None:\n y_pred = self.prediction_fn(y_pred)\n tokenized_y_pred = [nltk.tokenize.word_tokenize(y) for y in y_pred]\n ngrams = list(chain(*[[gram for gram in nltk.ngrams(y, self.ngram_order)] for y in tokenized_y_pred]))\n score = len(set(ngrams)) / len(ngrams)\n return torch.tensor(score, dtype=torch.float)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000002", "length_bytes": 20436, "license_type": "permissive", "methods": [{"docstring": "Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric", "name": "__init__", "signature": "def __init__(self, prediction_fn=None, name=None, ngram_order=1)"}, {"docstring": "Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels", "name": "_compute", "signature": "def _compute(self, y_pred, y_true)"}], "n_methods": 2, "prompt": "Implement the Python class `Distinct_Ngrams` described below.\n\nClass description:\nImplement the Distinct_Ngrams class.\n\nMethod signatures and docstrings:\n- def __init__(self, prediction_fn=None, name=None, ngram_order=1): Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric\n- def _compute(self, y_pred, y_true): Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels", "prompted_full_text": "Implement the Python class `Distinct_Ngrams` described below.\n\nClass description:\nImplement the Distinct_Ngrams class.\n\nMethod signatures and docstrings:\n- def __init__(self, prediction_fn=None, name=None, ngram_order=1): Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric\n- def _compute(self, y_pred, y_true): Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels\n\n<|skeleton|>\nclass Distinct_Ngrams:\n\n def __init__(self, prediction_fn=None, name=None, ngram_order=1):\n \"\"\"Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric\"\"\"\n <|body_0|>\n\n def _compute(self, y_pred, y_true):\n \"\"\"Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.prediction_fn = prediction_fn\n self.ngram_order = ngram_order\n if name is None:\n name = f'Distinct_{ngram_order}grams'\n super().__init__(name=name)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.prediction_fn is not None:\n y_pred = self.prediction_fn(y_pred)\n tokenized_y_pred = [nltk.tokenize.word_tokenize(y) for y in y_pred]\n ngrams = list(chain(*[[gram for gram in nltk.ngrams(y, self.ngram_order)] for y in tokenized_y_pred]))\n score = len(set(ngrams)) / len(ngrams)\n return torch.tensor(score, dtype=torch.float)\n<|end_body_1|>\n", "revision_id": "be1c9634252a336db1d61f0cee20cc0fd5ccd5a9", "skeleton": "<|skeleton|>\nclass Distinct_Ngrams:\n\n def __init__(self, prediction_fn=None, name=None, ngram_order=1):\n \"\"\"Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric\"\"\"\n <|body_0|>\n\n def _compute(self, y_pred, y_true):\n \"\"\"Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Distinct_Ngrams:\n def __init__(self, prediction_fn=None, name=None, ngram_order=1):\n \"\"\"Calculate distinct n-grams with a nltk word tokenizer Args: - prediction_fn: Function to convert y_pred into the same format as y_true (for example, convert logits to max index) - name (str): Name of the metric\"\"\"\n self.prediction_fn = prediction_fn\n self.ngram_order = ngram_order\n if name is None:\n name = f'Distinct_{ngram_order}grams'\n super().__init__(name=name)\n\n def _compute(self, y_pred, y_true):\n \"\"\"Args: - y_pred (List of str): Predicted labels - y_true (List of str): Ground truth labels\"\"\"\n if self.prediction_fn is not None:\n y_pred = self.prediction_fn(y_pred)\n tokenized_y_pred = [nltk.tokenize.word_tokenize(y) for y in y_pred]\n ngrams = list(chain(*[[gram for gram in nltk.ngrams(y, self.ngram_order)] for y in tokenized_y_pred]))\n score = len(set(ngrams)) / len(ngrams)\n return torch.tensor(score, dtype=torch.float)\n", "source": "the_stack_v2_python_sparse", "source_path": "tlidb/TLiDB/metrics/all_metrics.py", "source_repo": "alon-albalak/TLiDB", "split": "test", "star_events_count": 13} {"blob_id": "d4debfa56e0886cd9149ac9660403776113a9204", "bodies": ["try:\n regexes = await self.nyuki.storage.regexes.get()\nexcept AutoReconnect:\n return Response(status=503)\nreturn Response(regexes)", "request = await request.json()\ntry:\n regex = new_regex(request['title'], request['pattern'])\nexcept KeyError as exc:\n return Response(status=400, body={'error': 'missing parameter {}'.format(exc)})\nexcept re_error as exc:\n return Response(status=400, body={'error': 'invalid regular expression {}'.format(exc), 'error_code': 'invalid_regex'})\ntry:\n await self.nyuki.storage.regexes.insert(regex)\nexcept AutoReconnect:\n return Response(status=503)\nreturn Response(regex)", "try:\n rules = await self.nyuki.storage.regexes.get()\nexcept AutoReconnect:\n return Response(status=503)\nawait self.nyuki.storage.regexes.delete()\nreturn Response(rules)"], "bodies_text": "<|body_start_0|>\n try:\n regexes = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n return Response(regexes)\n<|end_body_0|>\n\n<|body_start_1|>\n request = await request.json()\n try:\n regex = new_regex(request['title'], request['pattern'])\n except KeyError as exc:\n return Response(status=400, body={'error': 'missing parameter {}'.format(exc)})\n except re_error as exc:\n return Response(status=400, body={'error': 'invalid regular expression {}'.format(exc), 'error_code': 'invalid_regex'})\n try:\n await self.nyuki.storage.regexes.insert(regex)\n except AutoReconnect:\n return Response(status=503)\n return Response(regex)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n rules = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n await self.nyuki.storage.regexes.delete()\n return Response(rules)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ApiFactoryRegexes", "detected_licenses": ["MIT", "GPL-1.0-or-later", "LicenseRef-scancode-other-copyleft", "GPL-2.0-or-later", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.1-or-later", "GPL-2.0-only", "LicenseRef-scancode-proprietary-license", "LicenseRef-scancode-generic-exception", "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ApiFactoryRegexes:\n\n async def get(self, request):\n \"\"\"Return the list of all regexes\"\"\"\n <|body_0|>\n\n async def put(self, request):\n \"\"\"Insert a new regex\"\"\"\n <|body_1|>\n\n async def delete(self, request):\n \"\"\"Delete all regexes and return the list\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n regexes = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n return Response(regexes)\n<|end_body_0|>\n\n<|body_start_1|>\n request = await request.json()\n try:\n regex = new_regex(request['title'], request['pattern'])\n except KeyError as exc:\n return Response(status=400, body={'error': 'missing parameter {}'.format(exc)})\n except re_error as exc:\n return Response(status=400, body={'error': 'invalid regular expression {}'.format(exc), 'error_code': 'invalid_regex'})\n try:\n await self.nyuki.storage.regexes.insert(regex)\n except AutoReconnect:\n return Response(status=503)\n return Response(regex)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n rules = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n await self.nyuki.storage.regexes.delete()\n return Response(rules)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000003", "length_bytes": 10772, "license_type": "permissive", "methods": [{"docstring": "Return the list of all regexes", "name": "get", "signature": "async def get(self, request)"}, {"docstring": "Insert a new regex", "name": "put", "signature": "async def put(self, request)"}, {"docstring": "Delete all regexes and return the list", "name": "delete", "signature": "async def delete(self, request)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006582", "prompt": "Implement the Python class `ApiFactoryRegexes` described below.\n\nClass description:\nImplement the ApiFactoryRegexes class.\n\nMethod signatures and docstrings:\n- async def get(self, request): Return the list of all regexes\n- async def put(self, request): Insert a new regex\n- async def delete(self, request): Delete all regexes and return the list", "prompted_full_text": "Implement the Python class `ApiFactoryRegexes` described below.\n\nClass description:\nImplement the ApiFactoryRegexes class.\n\nMethod signatures and docstrings:\n- async def get(self, request): Return the list of all regexes\n- async def put(self, request): Insert a new regex\n- async def delete(self, request): Delete all regexes and return the list\n\n<|skeleton|>\nclass ApiFactoryRegexes:\n\n async def get(self, request):\n \"\"\"Return the list of all regexes\"\"\"\n <|body_0|>\n\n async def put(self, request):\n \"\"\"Insert a new regex\"\"\"\n <|body_1|>\n\n async def delete(self, request):\n \"\"\"Delete all regexes and return the list\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n regexes = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n return Response(regexes)\n<|end_body_0|>\n\n<|body_start_1|>\n request = await request.json()\n try:\n regex = new_regex(request['title'], request['pattern'])\n except KeyError as exc:\n return Response(status=400, body={'error': 'missing parameter {}'.format(exc)})\n except re_error as exc:\n return Response(status=400, body={'error': 'invalid regular expression {}'.format(exc), 'error_code': 'invalid_regex'})\n try:\n await self.nyuki.storage.regexes.insert(regex)\n except AutoReconnect:\n return Response(status=503)\n return Response(regex)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n rules = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n await self.nyuki.storage.regexes.delete()\n return Response(rules)\n<|end_body_2|>\n", "revision_id": "f185fababee380660930243515652093855acfe7", "skeleton": "<|skeleton|>\nclass ApiFactoryRegexes:\n\n async def get(self, request):\n \"\"\"Return the list of all regexes\"\"\"\n <|body_0|>\n\n async def put(self, request):\n \"\"\"Insert a new regex\"\"\"\n <|body_1|>\n\n async def delete(self, request):\n \"\"\"Delete all regexes and return the list\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ApiFactoryRegexes:\n async def get(self, request):\n \"\"\"Return the list of all regexes\"\"\"\n try:\n regexes = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n return Response(regexes)\n\n async def put(self, request):\n \"\"\"Insert a new regex\"\"\"\n request = await request.json()\n try:\n regex = new_regex(request['title'], request['pattern'])\n except KeyError as exc:\n return Response(status=400, body={'error': 'missing parameter {}'.format(exc)})\n except re_error as exc:\n return Response(status=400, body={'error': 'invalid regular expression {}'.format(exc), 'error_code': 'invalid_regex'})\n try:\n await self.nyuki.storage.regexes.insert(regex)\n except AutoReconnect:\n return Response(status=503)\n return Response(regex)\n\n async def delete(self, request):\n \"\"\"Delete all regexes and return the list\"\"\"\n try:\n rules = await self.nyuki.storage.regexes.get()\n except AutoReconnect:\n return Response(status=503)\n await self.nyuki.storage.regexes.delete()\n return Response(rules)\n", "source": "the_stack_v2_python_sparse", "source_path": "nyuki/workflow/api/factory.py", "source_repo": "d-nery/nyuki", "split": "test", "star_events_count": 0} {"blob_id": "cd5bb3a8698e8979d56e330af1a563d0b4d9bd2e", "bodies": ["super(RCAN, self).__init__()\nn_resgroups = args.n_resgroups\nn_resblocks = args.n_resblocks\nn_feats = args.n_feats\nkernel_size = 3\nreduction = args.reduction\nscale = args.scale\nself.dytpe = mstype.float16\nrgb_mean = (0.4488, 0.4371, 0.404)\nrgb_std = (1.0, 1.0, 1.0)\nself.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)\nmodules_head = [conv(args.n_colors, n_feats, kernel_size)]\nmodules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, n_resblocks=n_resblocks) for _ in range(n_resgroups)]\nmodules_body.append(conv(n_feats, n_feats, kernel_size))\nmodules_tail = [Upsampler(conv, scale, n_feats), conv(n_feats, args.n_colors, kernel_size)]\nself.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)\nself.head = nn.SequentialCell(modules_head)\nself.body = nn.SequentialCell(modules_body)\nself.tail = nn.SequentialCell(modules_tail)", "x = self.sub_mean(x)\nx = self.head(x)\nres = self.body(x)\nres += x\nx = self.tail(res)\nx = self.add_mean(x)\nreturn x", "own_param = self.parameters_dict()\nfor name, new_param in new_param_dict.items():\n if len(name) >= 4 and name[:4] == 'net.':\n name = name[4:]\n if name in own_param:\n if isinstance(new_param, Parameter):\n param = own_param[name]\n if tuple(param.data.shape) == tuple(new_param.data.shape):\n param.set_data(type(param.data)(new_param.data))\n elif name.find('tail') == -1:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_param[name].shape, new_param.shape))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in parameters_dict()'.format(name))"], "bodies_text": "<|body_start_0|>\n super(RCAN, self).__init__()\n n_resgroups = args.n_resgroups\n n_resblocks = args.n_resblocks\n n_feats = args.n_feats\n kernel_size = 3\n reduction = args.reduction\n scale = args.scale\n self.dytpe = mstype.float16\n rgb_mean = (0.4488, 0.4371, 0.404)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)\n modules_head = [conv(args.n_colors, n_feats, kernel_size)]\n modules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, n_resblocks=n_resblocks) for _ in range(n_resgroups)]\n modules_body.append(conv(n_feats, n_feats, kernel_size))\n modules_tail = [Upsampler(conv, scale, n_feats), conv(n_feats, args.n_colors, kernel_size)]\n self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)\n self.head = nn.SequentialCell(modules_head)\n self.body = nn.SequentialCell(modules_body)\n self.tail = nn.SequentialCell(modules_tail)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.sub_mean(x)\n x = self.head(x)\n res = self.body(x)\n res += x\n x = self.tail(res)\n x = self.add_mean(x)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n own_param = self.parameters_dict()\n for name, new_param in new_param_dict.items():\n if len(name) >= 4 and name[:4] == 'net.':\n name = name[4:]\n if name in own_param:\n if isinstance(new_param, Parameter):\n param = own_param[name]\n if tuple(param.data.shape) == tuple(new_param.data.shape):\n param.set_data(type(param.data)(new_param.data))\n elif name.find('tail') == -1:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_param[name].shape, new_param.shape))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in parameters_dict()'.format(name))\n<|end_body_2|>\n", "class_docstring": "rcan", "class_name": "RCAN", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RCAN:\n \"\"\"rcan\"\"\"\n\n def __init__(self, args, conv=default_conv):\n \"\"\"rcan\"\"\"\n <|body_0|>\n\n def construct(self, x):\n \"\"\"rcan\"\"\"\n <|body_1|>\n\n def load_pre_trained_param_dict(self, new_param_dict, strict=True):\n \"\"\"load pre_trained param dict from rcan_x2\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RCAN, self).__init__()\n n_resgroups = args.n_resgroups\n n_resblocks = args.n_resblocks\n n_feats = args.n_feats\n kernel_size = 3\n reduction = args.reduction\n scale = args.scale\n self.dytpe = mstype.float16\n rgb_mean = (0.4488, 0.4371, 0.404)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)\n modules_head = [conv(args.n_colors, n_feats, kernel_size)]\n modules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, n_resblocks=n_resblocks) for _ in range(n_resgroups)]\n modules_body.append(conv(n_feats, n_feats, kernel_size))\n modules_tail = [Upsampler(conv, scale, n_feats), conv(n_feats, args.n_colors, kernel_size)]\n self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)\n self.head = nn.SequentialCell(modules_head)\n self.body = nn.SequentialCell(modules_body)\n self.tail = nn.SequentialCell(modules_tail)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.sub_mean(x)\n x = self.head(x)\n res = self.body(x)\n res += x\n x = self.tail(res)\n x = self.add_mean(x)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n own_param = self.parameters_dict()\n for name, new_param in new_param_dict.items():\n if len(name) >= 4 and name[:4] == 'net.':\n name = name[4:]\n if name in own_param:\n if isinstance(new_param, Parameter):\n param = own_param[name]\n if tuple(param.data.shape) == tuple(new_param.data.shape):\n param.set_data(type(param.data)(new_param.data))\n elif name.find('tail') == -1:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_param[name].shape, new_param.shape))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in parameters_dict()'.format(name))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000004", "length_bytes": 8312, "license_type": "permissive", "methods": [{"docstring": "rcan", "name": "__init__", "signature": "def __init__(self, args, conv=default_conv)"}, {"docstring": "rcan", "name": "construct", "signature": "def construct(self, x)"}, {"docstring": "load pre_trained param dict from rcan_x2", "name": "load_pre_trained_param_dict", "signature": "def load_pre_trained_param_dict(self, new_param_dict, strict=True)"}], "n_methods": 3, "prompt": "Implement the Python class `RCAN` described below.\n\nClass description:\nrcan\n\nMethod signatures and docstrings:\n- def __init__(self, args, conv=default_conv): rcan\n- def construct(self, x): rcan\n- def load_pre_trained_param_dict(self, new_param_dict, strict=True): load pre_trained param dict from rcan_x2", "prompted_full_text": "Implement the Python class `RCAN` described below.\n\nClass description:\nrcan\n\nMethod signatures and docstrings:\n- def __init__(self, args, conv=default_conv): rcan\n- def construct(self, x): rcan\n- def load_pre_trained_param_dict(self, new_param_dict, strict=True): load pre_trained param dict from rcan_x2\n\n<|skeleton|>\nclass RCAN:\n \"\"\"rcan\"\"\"\n\n def __init__(self, args, conv=default_conv):\n \"\"\"rcan\"\"\"\n <|body_0|>\n\n def construct(self, x):\n \"\"\"rcan\"\"\"\n <|body_1|>\n\n def load_pre_trained_param_dict(self, new_param_dict, strict=True):\n \"\"\"load pre_trained param dict from rcan_x2\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(RCAN, self).__init__()\n n_resgroups = args.n_resgroups\n n_resblocks = args.n_resblocks\n n_feats = args.n_feats\n kernel_size = 3\n reduction = args.reduction\n scale = args.scale\n self.dytpe = mstype.float16\n rgb_mean = (0.4488, 0.4371, 0.404)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)\n modules_head = [conv(args.n_colors, n_feats, kernel_size)]\n modules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, n_resblocks=n_resblocks) for _ in range(n_resgroups)]\n modules_body.append(conv(n_feats, n_feats, kernel_size))\n modules_tail = [Upsampler(conv, scale, n_feats), conv(n_feats, args.n_colors, kernel_size)]\n self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)\n self.head = nn.SequentialCell(modules_head)\n self.body = nn.SequentialCell(modules_body)\n self.tail = nn.SequentialCell(modules_tail)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.sub_mean(x)\n x = self.head(x)\n res = self.body(x)\n res += x\n x = self.tail(res)\n x = self.add_mean(x)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n own_param = self.parameters_dict()\n for name, new_param in new_param_dict.items():\n if len(name) >= 4 and name[:4] == 'net.':\n name = name[4:]\n if name in own_param:\n if isinstance(new_param, Parameter):\n param = own_param[name]\n if tuple(param.data.shape) == tuple(new_param.data.shape):\n param.set_data(type(param.data)(new_param.data))\n elif name.find('tail') == -1:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_param[name].shape, new_param.shape))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in parameters_dict()'.format(name))\n<|end_body_2|>\n", "revision_id": "eab643f51336dbf7d711f02d27e6516e5affee59", "skeleton": "<|skeleton|>\nclass RCAN:\n \"\"\"rcan\"\"\"\n\n def __init__(self, args, conv=default_conv):\n \"\"\"rcan\"\"\"\n <|body_0|>\n\n def construct(self, x):\n \"\"\"rcan\"\"\"\n <|body_1|>\n\n def load_pre_trained_param_dict(self, new_param_dict, strict=True):\n \"\"\"load pre_trained param dict from rcan_x2\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RCAN:\n \"\"\"rcan\"\"\"\n\n def __init__(self, args, conv=default_conv):\n \"\"\"rcan\"\"\"\n super(RCAN, self).__init__()\n n_resgroups = args.n_resgroups\n n_resblocks = args.n_resblocks\n n_feats = args.n_feats\n kernel_size = 3\n reduction = args.reduction\n scale = args.scale\n self.dytpe = mstype.float16\n rgb_mean = (0.4488, 0.4371, 0.404)\n rgb_std = (1.0, 1.0, 1.0)\n self.sub_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std)\n modules_head = [conv(args.n_colors, n_feats, kernel_size)]\n modules_body = [ResidualGroup(conv, n_feats, kernel_size, reduction, n_resblocks=n_resblocks) for _ in range(n_resgroups)]\n modules_body.append(conv(n_feats, n_feats, kernel_size))\n modules_tail = [Upsampler(conv, scale, n_feats), conv(n_feats, args.n_colors, kernel_size)]\n self.add_mean = MeanShift(args.rgb_range, rgb_mean, rgb_std, 1)\n self.head = nn.SequentialCell(modules_head)\n self.body = nn.SequentialCell(modules_body)\n self.tail = nn.SequentialCell(modules_tail)\n\n def construct(self, x):\n \"\"\"rcan\"\"\"\n x = self.sub_mean(x)\n x = self.head(x)\n res = self.body(x)\n res += x\n x = self.tail(res)\n x = self.add_mean(x)\n return x\n\n def load_pre_trained_param_dict(self, new_param_dict, strict=True):\n \"\"\"load pre_trained param dict from rcan_x2\"\"\"\n own_param = self.parameters_dict()\n for name, new_param in new_param_dict.items():\n if len(name) >= 4 and name[:4] == 'net.':\n name = name[4:]\n if name in own_param:\n if isinstance(new_param, Parameter):\n param = own_param[name]\n if tuple(param.data.shape) == tuple(new_param.data.shape):\n param.set_data(type(param.data)(new_param.data))\n elif name.find('tail') == -1:\n raise RuntimeError('While copying the parameter named {}, whose dimensions in the model are {} and whose dimensions in the checkpoint are {}.'.format(name, own_param[name].shape, new_param.shape))\n elif strict:\n if name.find('tail') == -1:\n raise KeyError('unexpected key \"{}\" in parameters_dict()'.format(name))\n", "source": "the_stack_v2_python_sparse", "source_path": "research/cv/RCAN/src/rcan_model.py", "source_repo": "mindspore-ai/models", "split": "test", "star_events_count": 301} {"blob_id": "efb8a617260fbc048291db9233f27df8d0211c05", "bodies": ["if context is None:\n context = self.context\ndefaults = [fti.getId() for fti in self.getDefaultAddableTypes(context)]\nreturn [x for x in types if x in defaults]", "if context is None:\n context = self.context\nmode = self.getConstrainTypesMode()\ndefault_addable = self.getDefaultAddableTypes(context)\nif mode == DISABLED:\n return default_addable\nelif mode == ENABLED:\n if hasattr(self.context, 'locally_allowed_types'):\n return [t for t in default_addable if t.getId() in self.context.locally_allowed_types]\n else:\n return default_addable\nelif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return_tids = self._filterByDefaults(parent_constrain_adapter.getLocallyAllowedTypes(context), context)\n return [t for t in default_addable if t.getId() in return_tids]\nelse:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)", "if context is None:\n context = self.context\nmode = self.getConstrainTypesMode()\ndefault_addable = [t.getId() for t in self.getDefaultAddableTypes(context)]\nif mode == DISABLED:\n return default_addable\nelif mode == ENABLED:\n if hasattr(self.context, 'immediately_addable_types'):\n return self._filterByDefaults(self.context.immediately_addable_types, context)\nelif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return self._filterByDefaults(parent_constrain_adapter.getImmediatelyAddableTypes(context), context)\nelse:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)"], "bodies_text": "<|body_start_0|>\n if context is None:\n context = self.context\n defaults = [fti.getId() for fti in self.getDefaultAddableTypes(context)]\n return [x for x in types if x in defaults]\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = self.getDefaultAddableTypes(context)\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'locally_allowed_types'):\n return [t for t in default_addable if t.getId() in self.context.locally_allowed_types]\n else:\n return default_addable\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return_tids = self._filterByDefaults(parent_constrain_adapter.getLocallyAllowedTypes(context), context)\n return [t for t in default_addable if t.getId() in return_tids]\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = [t.getId() for t in self.getDefaultAddableTypes(context)]\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'immediately_addable_types'):\n return self._filterByDefaults(self.context.immediately_addable_types, context)\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return self._filterByDefaults(parent_constrain_adapter.getImmediatelyAddableTypes(context), context)\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "gwConstrainTypesBehavior", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass gwConstrainTypesBehavior:\n\n def _filterByDefaults(self, types, context=None):\n \"\"\"Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions\"\"\"\n <|body_0|>\n\n def allowedContentTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.\"\"\"\n <|body_1|>\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if context is None:\n context = self.context\n defaults = [fti.getId() for fti in self.getDefaultAddableTypes(context)]\n return [x for x in types if x in defaults]\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = self.getDefaultAddableTypes(context)\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'locally_allowed_types'):\n return [t for t in default_addable if t.getId() in self.context.locally_allowed_types]\n else:\n return default_addable\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return_tids = self._filterByDefaults(parent_constrain_adapter.getLocallyAllowedTypes(context), context)\n return [t for t in default_addable if t.getId() in return_tids]\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = [t.getId() for t in self.getDefaultAddableTypes(context)]\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'immediately_addable_types'):\n return self._filterByDefaults(self.context.immediately_addable_types, context)\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return self._filterByDefaults(parent_constrain_adapter.getImmediatelyAddableTypes(context), context)\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000005", "length_bytes": 4164, "license_type": "no_license", "methods": [{"docstring": "Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions", "name": "_filterByDefaults", "signature": "def _filterByDefaults(self, types, context=None)"}, {"docstring": "If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.", "name": "allowedContentTypes", "signature": "def allowedContentTypes(self, context=None)"}, {"docstring": "If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types", "name": "getImmediatelyAddableTypes", "signature": "def getImmediatelyAddableTypes(self, context=None)"}], "n_methods": 3, "prompt": "Implement the Python class `gwConstrainTypesBehavior` described below.\n\nClass description:\nImplement the gwConstrainTypesBehavior class.\n\nMethod signatures and docstrings:\n- def _filterByDefaults(self, types, context=None): Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions\n- def allowedContentTypes(self, context=None): If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.\n- def getImmediatelyAddableTypes(self, context=None): If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types", "prompted_full_text": "Implement the Python class `gwConstrainTypesBehavior` described below.\n\nClass description:\nImplement the gwConstrainTypesBehavior class.\n\nMethod signatures and docstrings:\n- def _filterByDefaults(self, types, context=None): Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions\n- def allowedContentTypes(self, context=None): If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.\n- def getImmediatelyAddableTypes(self, context=None): If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types\n\n<|skeleton|>\nclass gwConstrainTypesBehavior:\n\n def _filterByDefaults(self, types, context=None):\n \"\"\"Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions\"\"\"\n <|body_0|>\n\n def allowedContentTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.\"\"\"\n <|body_1|>\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if context is None:\n context = self.context\n defaults = [fti.getId() for fti in self.getDefaultAddableTypes(context)]\n return [x for x in types if x in defaults]\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = self.getDefaultAddableTypes(context)\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'locally_allowed_types'):\n return [t for t in default_addable if t.getId() in self.context.locally_allowed_types]\n else:\n return default_addable\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return_tids = self._filterByDefaults(parent_constrain_adapter.getLocallyAllowedTypes(context), context)\n return [t for t in default_addable if t.getId() in return_tids]\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n<|end_body_1|>\n\n<|body_start_2|>\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = [t.getId() for t in self.getDefaultAddableTypes(context)]\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'immediately_addable_types'):\n return self._filterByDefaults(self.context.immediately_addable_types, context)\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return self._filterByDefaults(parent_constrain_adapter.getImmediatelyAddableTypes(context), context)\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n<|end_body_2|>\n", "revision_id": "d0b1c0db3ae4b355ab9d694b8392b73b65036d1e", "skeleton": "<|skeleton|>\nclass gwConstrainTypesBehavior:\n\n def _filterByDefaults(self, types, context=None):\n \"\"\"Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions\"\"\"\n <|body_0|>\n\n def allowedContentTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.\"\"\"\n <|body_1|>\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class gwConstrainTypesBehavior:\n def _filterByDefaults(self, types, context=None):\n \"\"\"Filter the given types by the items which would also be allowed by default. Important, else users could circumvent security restritions\"\"\"\n if context is None:\n context = self.context\n defaults = [fti.getId() for fti in self.getDefaultAddableTypes(context)]\n return [x for x in types if x in defaults]\n\n def allowedContentTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally allowed types. If the setting is ACQUIRE, acquire the locally allowed types according to the ACQUIRE rules, described in the interface. If constraints are disabled, use the default addable types This method returns the FTI, NOT the FTI id, like most other methods.\"\"\"\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = self.getDefaultAddableTypes(context)\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'locally_allowed_types'):\n return [t for t in default_addable if t.getId() in self.context.locally_allowed_types]\n else:\n return default_addable\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return_tids = self._filterByDefaults(parent_constrain_adapter.getLocallyAllowedTypes(context), context)\n return [t for t in default_addable if t.getId() in return_tids]\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n\n def getImmediatelyAddableTypes(self, context=None):\n \"\"\"If constraints are enabled, return the locally immediately addable tpes. If the setting is ACQUIRE, acquire the immediately addable types from the parent, according to the rules described in the interface. If constraints are disabled, use the default addable types\"\"\"\n if context is None:\n context = self.context\n mode = self.getConstrainTypesMode()\n default_addable = [t.getId() for t in self.getDefaultAddableTypes(context)]\n if mode == DISABLED:\n return default_addable\n elif mode == ENABLED:\n if hasattr(self.context, 'immediately_addable_types'):\n return self._filterByDefaults(self.context.immediately_addable_types, context)\n elif mode == ACQUIRE:\n parent = self.context.__parent__\n parent_constrain_adapter = ISelectableConstrainTypes(parent, None)\n if not parent_constrain_adapter:\n return default_addable\n return self._filterByDefaults(parent_constrain_adapter.getImmediatelyAddableTypes(context), context)\n else:\n raise Exception('Wrong constraint setting. %i is an invalid value', mode)\n", "source": "the_stack_v2_python_sparse", "source_path": "genweb/core/overrides.py", "source_repo": "UPCnet/genweb.core", "split": "test", "star_events_count": 2} {"blob_id": "8bb053396bfc84bed1b2e1f9f358852fc1435138", "bodies": ["super(LabelSmoothingLoss, self).__init__()\nself.confidence = 1.0 - smoothing\nself.smoothing = smoothing\nself.dim = dim", "with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (pred.shape[-1] - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\nreturn torch.mean(torch.sum(-true_dist * pred, dim=self.dim))"], "bodies_text": "<|body_start_0|>\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.dim = dim\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (pred.shape[-1] - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n<|end_body_1|>\n", "class_docstring": "Create a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455", "class_name": "LabelSmoothingLoss", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LabelSmoothingLoss:\n \"\"\"Create a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455\"\"\"\n\n def __init__(self, smoothing=0.0, dim=-1):\n \"\"\"Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum\"\"\"\n <|body_0|>\n\n def forward(self, pred, target):\n \"\"\"Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.dim = dim\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (pred.shape[-1] - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000006", "length_bytes": 17400, "license_type": "permissive", "methods": [{"docstring": "Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum", "name": "__init__", "signature": "def __init__(self, smoothing=0.0, dim=-1)"}, {"docstring": "Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss", "name": "forward", "signature": "def forward(self, pred, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_023053", "prompt": "Implement the Python class `LabelSmoothingLoss` described below.\n\nClass description:\nCreate a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455\n\nMethod signatures and docstrings:\n- def __init__(self, smoothing=0.0, dim=-1): Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum\n- def forward(self, pred, target): Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss", "prompted_full_text": "Implement the Python class `LabelSmoothingLoss` described below.\n\nClass description:\nCreate a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455\n\nMethod signatures and docstrings:\n- def __init__(self, smoothing=0.0, dim=-1): Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum\n- def forward(self, pred, target): Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss\n\n<|skeleton|>\nclass LabelSmoothingLoss:\n \"\"\"Create a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455\"\"\"\n\n def __init__(self, smoothing=0.0, dim=-1):\n \"\"\"Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum\"\"\"\n <|body_0|>\n\n def forward(self, pred, target):\n \"\"\"Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.dim = dim\n<|end_body_0|>\n\n<|body_start_1|>\n with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (pred.shape[-1] - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n<|end_body_1|>\n", "revision_id": "1b9fbe6c89c74dc706fd8d3b11ea08977ba2c1d3", "skeleton": "<|skeleton|>\nclass LabelSmoothingLoss:\n \"\"\"Create a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455\"\"\"\n\n def __init__(self, smoothing=0.0, dim=-1):\n \"\"\"Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum\"\"\"\n <|body_0|>\n\n def forward(self, pred, target):\n \"\"\"Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LabelSmoothingLoss:\n \"\"\"Create a NLL loss but by employing label smoothing. Particularly useful when the data is not 100% reliable, and there is a probability that other labels can actually be the real one. Credits to github users @ https://github.com/pytorch/pytorch/issues/7455\"\"\"\n\n def __init__(self, smoothing=0.0, dim=-1):\n \"\"\"Create the label smoothing loss :param smoothing: smooting probability (0 for no smoothing, 1 for full smoothing) :param dim: dimension to apply the loss sum\"\"\"\n super(LabelSmoothingLoss, self).__init__()\n self.confidence = 1.0 - smoothing\n self.smoothing = smoothing\n self.dim = dim\n\n def forward(self, pred, target):\n \"\"\"Compute the forward pass, to compute the loss :param pred: predictions from a model :param target: target data (ground truth) :return: the overall mean of the loss\"\"\"\n with torch.no_grad():\n true_dist = torch.zeros_like(pred)\n true_dist.fill_(self.smoothing / (pred.shape[-1] - 1))\n true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)\n return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))\n", "source": "the_stack_v2_python_sparse", "source_path": "models/interaction_modules/train_aux.py", "source_repo": "pedro-mgb/pedestrian-arc-lstm-smf", "split": "test", "star_events_count": 4} {"blob_id": "88419b381e84e266ed7b834b99a16df2852fe6fd", "bodies": ["if not out:\n return\nprint('[{}]: {}'.format(tag, message))", "if not debug:\n return\nprint('[{}]: {}'.format(tag, message))"], "bodies_text": "<|body_start_0|>\n if not out:\n return\n print('[{}]: {}'.format(tag, message))\n<|end_body_0|>\n\n<|body_start_1|>\n if not debug:\n return\n print('[{}]: {}'.format(tag, message))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GeneralLogger", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GeneralLogger:\n\n def o(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'out' = true, else do nothing\"\"\"\n <|body_0|>\n\n def d(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'debug' = true, else do nothing\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not out:\n return\n print('[{}]: {}'.format(tag, message))\n<|end_body_0|>\n\n<|body_start_1|>\n if not debug:\n return\n print('[{}]: {}'.format(tag, message))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000007", "length_bytes": 2014, "license_type": "no_license", "methods": [{"docstring": "out message with tag of specified if config 'out' = true, else do nothing", "name": "o", "signature": "def o(cls, message, tag=None)"}, {"docstring": "out message with tag of specified if config 'debug' = true, else do nothing", "name": "d", "signature": "def d(cls, message, tag=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028683", "prompt": "Implement the Python class `GeneralLogger` described below.\n\nClass description:\nImplement the GeneralLogger class.\n\nMethod signatures and docstrings:\n- def o(cls, message, tag=None): out message with tag of specified if config 'out' = true, else do nothing\n- def d(cls, message, tag=None): out message with tag of specified if config 'debug' = true, else do nothing", "prompted_full_text": "Implement the Python class `GeneralLogger` described below.\n\nClass description:\nImplement the GeneralLogger class.\n\nMethod signatures and docstrings:\n- def o(cls, message, tag=None): out message with tag of specified if config 'out' = true, else do nothing\n- def d(cls, message, tag=None): out message with tag of specified if config 'debug' = true, else do nothing\n\n<|skeleton|>\nclass GeneralLogger:\n\n def o(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'out' = true, else do nothing\"\"\"\n <|body_0|>\n\n def d(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'debug' = true, else do nothing\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not out:\n return\n print('[{}]: {}'.format(tag, message))\n<|end_body_0|>\n\n<|body_start_1|>\n if not debug:\n return\n print('[{}]: {}'.format(tag, message))\n<|end_body_1|>\n", "revision_id": "13d5ef9100bbc09f1b6d4b57b868833449ea9699", "skeleton": "<|skeleton|>\nclass GeneralLogger:\n\n def o(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'out' = true, else do nothing\"\"\"\n <|body_0|>\n\n def d(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'debug' = true, else do nothing\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GeneralLogger:\n def o(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'out' = true, else do nothing\"\"\"\n if not out:\n return\n print('[{}]: {}'.format(tag, message))\n\n def d(cls, message, tag=None):\n \"\"\"out message with tag of specified if config 'debug' = true, else do nothing\"\"\"\n if not debug:\n return\n print('[{}]: {}'.format(tag, message))\n", "source": "the_stack_v2_python_sparse", "source_path": "AppLogger.py", "source_repo": "omarsgalal/PlayWithYourHand", "split": "test", "star_events_count": 4} {"blob_id": "8edf555bc54bc48fb0a9822b63c04602dc87859d", "bodies": ["self.data = data\nif data is None:\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\nelse:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n ' Normal approximation '\n mean = float(sum(data) / len(data))\n a = 0\n for i in data:\n a += (i - mean) ** 2\n var = a / len(data)\n self.n = round(mean ** 2 / (mean - var))\n self.p = mean / self.n", "fact = 1\nfor i in range(2, k + 1):\n fact = fact * i\nreturn fact", "if not isinstance(k, int):\n k = int(k)\nif k < 0:\n return 0\nfactor = self.fact(self.n) / (self.fact(k) * self.fact(self.n - k))\npmf = factor * self.p ** k * (1 - self.p) ** (self.n - k)\nreturn pmf", "if not isinstance(k, int):\n k = int(k)\nif k < 0:\n return 0\na = 0\nfor i in range(k + 1):\n a += self.pmf(i)\nreturn a"], "bodies_text": "<|body_start_0|>\n self.data = data\n if data is None:\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n ' Normal approximation '\n mean = float(sum(data) / len(data))\n a = 0\n for i in data:\n a += (i - mean) ** 2\n var = a / len(data)\n self.n = round(mean ** 2 / (mean - var))\n self.p = mean / self.n\n<|end_body_0|>\n\n<|body_start_1|>\n fact = 1\n for i in range(2, k + 1):\n fact = fact * i\n return fact\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n factor = self.fact(self.n) / (self.fact(k) * self.fact(self.n - k))\n pmf = factor * self.p ** k * (1 - self.p) ** (self.n - k)\n return pmf\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n a = 0\n for i in range(k + 1):\n a += self.pmf(i)\n return a\n<|end_body_3|>\n", "class_docstring": "represent an Binomial distribution", "class_name": "Binomial", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Binomial:\n \"\"\"represent an Binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class contructor\"\"\"\n <|body_0|>\n\n def fact(self, k):\n \"\"\"factorial\"\"\"\n <|body_1|>\n\n def pmf(self, k):\n \"\"\"Calculate the value of the PMF for a given number of successes\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"Calculate the value of the CDF for a given number of successes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = data\n if data is None:\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n ' Normal approximation '\n mean = float(sum(data) / len(data))\n a = 0\n for i in data:\n a += (i - mean) ** 2\n var = a / len(data)\n self.n = round(mean ** 2 / (mean - var))\n self.p = mean / self.n\n<|end_body_0|>\n\n<|body_start_1|>\n fact = 1\n for i in range(2, k + 1):\n fact = fact * i\n return fact\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n factor = self.fact(self.n) / (self.fact(k) * self.fact(self.n - k))\n pmf = factor * self.p ** k * (1 - self.p) ** (self.n - k)\n return pmf\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n a = 0\n for i in range(k + 1):\n a += self.pmf(i)\n return a\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000008", "length_bytes": 1863, "license_type": "no_license", "methods": [{"docstring": "Class contructor", "name": "__init__", "signature": "def __init__(self, data=None, n=1, p=0.5)"}, {"docstring": "factorial", "name": "fact", "signature": "def fact(self, k)"}, {"docstring": "Calculate the value of the PMF for a given number of successes", "name": "pmf", "signature": "def pmf(self, k)"}, {"docstring": "Calculate the value of the CDF for a given number of successes", "name": "cdf", "signature": "def cdf(self, k)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_000960", "prompt": "Implement the Python class `Binomial` described below.\n\nClass description:\nrepresent an Binomial distribution\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, n=1, p=0.5): Class contructor\n- def fact(self, k): factorial\n- def pmf(self, k): Calculate the value of the PMF for a given number of successes\n- def cdf(self, k): Calculate the value of the CDF for a given number of successes", "prompted_full_text": "Implement the Python class `Binomial` described below.\n\nClass description:\nrepresent an Binomial distribution\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, n=1, p=0.5): Class contructor\n- def fact(self, k): factorial\n- def pmf(self, k): Calculate the value of the PMF for a given number of successes\n- def cdf(self, k): Calculate the value of the CDF for a given number of successes\n\n<|skeleton|>\nclass Binomial:\n \"\"\"represent an Binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class contructor\"\"\"\n <|body_0|>\n\n def fact(self, k):\n \"\"\"factorial\"\"\"\n <|body_1|>\n\n def pmf(self, k):\n \"\"\"Calculate the value of the PMF for a given number of successes\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"Calculate the value of the CDF for a given number of successes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = data\n if data is None:\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n ' Normal approximation '\n mean = float(sum(data) / len(data))\n a = 0\n for i in data:\n a += (i - mean) ** 2\n var = a / len(data)\n self.n = round(mean ** 2 / (mean - var))\n self.p = mean / self.n\n<|end_body_0|>\n\n<|body_start_1|>\n fact = 1\n for i in range(2, k + 1):\n fact = fact * i\n return fact\n<|end_body_1|>\n\n<|body_start_2|>\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n factor = self.fact(self.n) / (self.fact(k) * self.fact(self.n - k))\n pmf = factor * self.p ** k * (1 - self.p) ** (self.n - k)\n return pmf\n<|end_body_2|>\n\n<|body_start_3|>\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n a = 0\n for i in range(k + 1):\n a += self.pmf(i)\n return a\n<|end_body_3|>\n", "revision_id": "16dc37d1c6dc00a271053b60724c51763914029a", "skeleton": "<|skeleton|>\nclass Binomial:\n \"\"\"represent an Binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class contructor\"\"\"\n <|body_0|>\n\n def fact(self, k):\n \"\"\"factorial\"\"\"\n <|body_1|>\n\n def pmf(self, k):\n \"\"\"Calculate the value of the PMF for a given number of successes\"\"\"\n <|body_2|>\n\n def cdf(self, k):\n \"\"\"Calculate the value of the CDF for a given number of successes\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Binomial:\n \"\"\"represent an Binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Class contructor\"\"\"\n self.data = data\n if data is None:\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p <= 0 or p >= 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) <= 2:\n raise ValueError('data must contain multiple values')\n ' Normal approximation '\n mean = float(sum(data) / len(data))\n a = 0\n for i in data:\n a += (i - mean) ** 2\n var = a / len(data)\n self.n = round(mean ** 2 / (mean - var))\n self.p = mean / self.n\n\n def fact(self, k):\n \"\"\"factorial\"\"\"\n fact = 1\n for i in range(2, k + 1):\n fact = fact * i\n return fact\n\n def pmf(self, k):\n \"\"\"Calculate the value of the PMF for a given number of successes\"\"\"\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n factor = self.fact(self.n) / (self.fact(k) * self.fact(self.n - k))\n pmf = factor * self.p ** k * (1 - self.p) ** (self.n - k)\n return pmf\n\n def cdf(self, k):\n \"\"\"Calculate the value of the CDF for a given number of successes\"\"\"\n if not isinstance(k, int):\n k = int(k)\n if k < 0:\n return 0\n a = 0\n for i in range(k + 1):\n a += self.pmf(i)\n return a\n", "source": "the_stack_v2_python_sparse", "source_path": "math/0x03-probability/binomial.py", "source_repo": "jaycer95/holbertonschool-machine_learning", "split": "test", "star_events_count": 0} {"blob_id": "95cd12e8e35bc21dae0b08d8dcac3c00c5e00970", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn WindowsAppXAppAssignmentSettings()", "from .mobile_app_assignment_settings import MobileAppAssignmentSettings\nfrom .mobile_app_assignment_settings import MobileAppAssignmentSettings\nfields: Dict[str, Callable[[Any], None]] = {'useDeviceContext': lambda n: setattr(self, 'use_device_context', n.get_bool_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_bool_value('useDeviceContext', self.use_device_context)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return WindowsAppXAppAssignmentSettings()\n<|end_body_0|>\n\n<|body_start_1|>\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n fields: Dict[str, Callable[[Any], None]] = {'useDeviceContext': lambda n: setattr(self, 'use_device_context', n.get_bool_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bool_value('useDeviceContext', self.use_device_context)\n<|end_body_2|>\n", "class_docstring": "Contains properties used when assigning a Windows AppX mobile app to a group.", "class_name": "WindowsAppXAppAssignmentSettings", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WindowsAppXAppAssignmentSettings:\n \"\"\"Contains properties used when assigning a Windows AppX mobile app to a group.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return WindowsAppXAppAssignmentSettings()\n<|end_body_0|>\n\n<|body_start_1|>\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n fields: Dict[str, Callable[[Any], None]] = {'useDeviceContext': lambda n: setattr(self, 'use_device_context', n.get_bool_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bool_value('useDeviceContext', self.use_device_context)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000009", "length_bytes": 2660, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_013970", "prompt": "Implement the Python class `WindowsAppXAppAssignmentSettings` described below.\n\nClass description:\nContains properties used when assigning a Windows AppX mobile app to a group.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `WindowsAppXAppAssignmentSettings` described below.\n\nClass description:\nContains properties used when assigning a Windows AppX mobile app to a group.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass WindowsAppXAppAssignmentSettings:\n \"\"\"Contains properties used when assigning a Windows AppX mobile app to a group.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return WindowsAppXAppAssignmentSettings()\n<|end_body_0|>\n\n<|body_start_1|>\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n fields: Dict[str, Callable[[Any], None]] = {'useDeviceContext': lambda n: setattr(self, 'use_device_context', n.get_bool_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bool_value('useDeviceContext', self.use_device_context)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass WindowsAppXAppAssignmentSettings:\n \"\"\"Contains properties used when assigning a Windows AppX mobile app to a group.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WindowsAppXAppAssignmentSettings:\n \"\"\"Contains properties used when assigning a Windows AppX mobile app to a group.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> WindowsAppXAppAssignmentSettings:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: WindowsAppXAppAssignmentSettings\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return WindowsAppXAppAssignmentSettings()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n from .mobile_app_assignment_settings import MobileAppAssignmentSettings\n fields: Dict[str, Callable[[Any], None]] = {'useDeviceContext': lambda n: setattr(self, 'use_device_context', n.get_bool_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_bool_value('useDeviceContext', self.use_device_context)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/windows_app_x_app_assignment_settings.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "11a72443dfdce4db6dd71ccd35e37422d9a7c62d", "bodies": ["if value is self.field.missing_value:\n return []\nconverter = self._get_converter(self.field.value_type)\nreturn [converter.to_widget_value(v) for v in value]", "if len(value) == 0:\n return self.field.missing_value\nconverter = self._get_converter(self.field.value_type)\nvalues = [converter.to_field_value(v) for v in value]\ncollection_type = self.field._type\nreturn collection_type(values)"], "bodies_text": "<|body_start_0|>\n if value is self.field.missing_value:\n return []\n converter = self._get_converter(self.field.value_type)\n return [converter.to_widget_value(v) for v in value]\n<|end_body_0|>\n\n<|body_start_1|>\n if len(value) == 0:\n return self.field.missing_value\n converter = self._get_converter(self.field.value_type)\n values = [converter.to_field_value(v) for v in value]\n collection_type = self.field._type\n return collection_type(values)\n<|end_body_1|>\n", "class_docstring": "Data converter for IMultiWidget.", "class_name": "MultiConverter", "detected_licenses": ["ZPL-2.1"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiConverter:\n \"\"\"Data converter for IMultiWidget.\"\"\"\n\n def to_widget_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n <|body_0|>\n\n def to_field_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is self.field.missing_value:\n return []\n converter = self._get_converter(self.field.value_type)\n return [converter.to_widget_value(v) for v in value]\n<|end_body_0|>\n\n<|body_start_1|>\n if len(value) == 0:\n return self.field.missing_value\n converter = self._get_converter(self.field.value_type)\n values = [converter.to_field_value(v) for v in value]\n collection_type = self.field._type\n return collection_type(values)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000010", "length_bytes": 16755, "license_type": "permissive", "methods": [{"docstring": "Just dispatch it.", "name": "to_widget_value", "signature": "def to_widget_value(self, value)"}, {"docstring": "Just dispatch it.", "name": "to_field_value", "signature": "def to_field_value(self, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000726", "prompt": "Implement the Python class `MultiConverter` described below.\n\nClass description:\nData converter for IMultiWidget.\n\nMethod signatures and docstrings:\n- def to_widget_value(self, value): Just dispatch it.\n- def to_field_value(self, value): Just dispatch it.", "prompted_full_text": "Implement the Python class `MultiConverter` described below.\n\nClass description:\nData converter for IMultiWidget.\n\nMethod signatures and docstrings:\n- def to_widget_value(self, value): Just dispatch it.\n- def to_field_value(self, value): Just dispatch it.\n\n<|skeleton|>\nclass MultiConverter:\n \"\"\"Data converter for IMultiWidget.\"\"\"\n\n def to_widget_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n <|body_0|>\n\n def to_field_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if value is self.field.missing_value:\n return []\n converter = self._get_converter(self.field.value_type)\n return [converter.to_widget_value(v) for v in value]\n<|end_body_0|>\n\n<|body_start_1|>\n if len(value) == 0:\n return self.field.missing_value\n converter = self._get_converter(self.field.value_type)\n values = [converter.to_field_value(v) for v in value]\n collection_type = self.field._type\n return collection_type(values)\n<|end_body_1|>\n", "revision_id": "e83e2ce314355f98eaf66e90ad6feccbda7934f9", "skeleton": "<|skeleton|>\nclass MultiConverter:\n \"\"\"Data converter for IMultiWidget.\"\"\"\n\n def to_widget_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n <|body_0|>\n\n def to_field_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MultiConverter:\n \"\"\"Data converter for IMultiWidget.\"\"\"\n\n def to_widget_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n if value is self.field.missing_value:\n return []\n converter = self._get_converter(self.field.value_type)\n return [converter.to_widget_value(v) for v in value]\n\n def to_field_value(self, value):\n \"\"\"Just dispatch it.\"\"\"\n if len(value) == 0:\n return self.field.missing_value\n converter = self._get_converter(self.field.value_type)\n values = [converter.to_field_value(v) for v in value]\n collection_type = self.field._type\n return collection_type(values)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pyams_form/converter.py", "source_repo": "Py-AMS/pyams-form", "split": "test", "star_events_count": 0} {"blob_id": "1ded29b013b3c8fc349828d6d63c7b41d569efb8", "bodies": ["super(AutoAugmentation, self).__init__(n_level)\nself.policies = policies\nself.n_select = n_select", "chosen_policies = random.sample(self.policies, k=self.n_select)\nfor name, pr, level in chain.from_iterable(chosen_policies):\n if random.random() > pr:\n continue\n img = self._apply_augment(img, name, level)\nreturn img"], "bodies_text": "<|body_start_0|>\n super(AutoAugmentation, self).__init__(n_level)\n self.policies = policies\n self.n_select = n_select\n<|end_body_0|>\n\n<|body_start_1|>\n chosen_policies = random.sample(self.policies, k=self.n_select)\n for name, pr, level in chain.from_iterable(chosen_policies):\n if random.random() > pr:\n continue\n img = self._apply_augment(img, name, level)\n return img\n<|end_body_1|>\n", "class_docstring": "Auto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf", "class_name": "AutoAugmentation", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AutoAugmentation:\n \"\"\"Auto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf\"\"\"\n\n def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def __call__(self, img: Image) -> Image:\n \"\"\"Run augmentations.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(AutoAugmentation, self).__init__(n_level)\n self.policies = policies\n self.n_select = n_select\n<|end_body_0|>\n\n<|body_start_1|>\n chosen_policies = random.sample(self.policies, k=self.n_select)\n for name, pr, level in chain.from_iterable(chosen_policies):\n if random.random() > pr:\n continue\n img = self._apply_augment(img, name, level)\n return img\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000011", "length_bytes": 5467, "license_type": "permissive", "methods": [{"docstring": "Initialize.", "name": "__init__", "signature": "def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None"}, {"docstring": "Run augmentations.", "name": "__call__", "signature": "def __call__(self, img: Image) -> Image"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019342", "prompt": "Implement the Python class `AutoAugmentation` described below.\n\nClass description:\nAuto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf\n\nMethod signatures and docstrings:\n- def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None: Initialize.\n- def __call__(self, img: Image) -> Image: Run augmentations.", "prompted_full_text": "Implement the Python class `AutoAugmentation` described below.\n\nClass description:\nAuto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf\n\nMethod signatures and docstrings:\n- def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None: Initialize.\n- def __call__(self, img: Image) -> Image: Run augmentations.\n\n<|skeleton|>\nclass AutoAugmentation:\n \"\"\"Auto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf\"\"\"\n\n def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def __call__(self, img: Image) -> Image:\n \"\"\"Run augmentations.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(AutoAugmentation, self).__init__(n_level)\n self.policies = policies\n self.n_select = n_select\n<|end_body_0|>\n\n<|body_start_1|>\n chosen_policies = random.sample(self.policies, k=self.n_select)\n for name, pr, level in chain.from_iterable(chosen_policies):\n if random.random() > pr:\n continue\n img = self._apply_augment(img, name, level)\n return img\n<|end_body_1|>\n", "revision_id": "88bcff70e93dd68058a5cf0dfeac119a57abc6de", "skeleton": "<|skeleton|>\nclass AutoAugmentation:\n \"\"\"Auto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf\"\"\"\n\n def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None:\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def __call__(self, img: Image) -> Image:\n \"\"\"Run augmentations.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AutoAugmentation:\n \"\"\"Auto augmentation class. References: https://arxiv.org/pdf/1805.09501.pdf\"\"\"\n\n def __init__(self, policies: List[List[Tuple[str, float, int]]], n_select: int=1, n_level: int=10) -> None:\n \"\"\"Initialize.\"\"\"\n super(AutoAugmentation, self).__init__(n_level)\n self.policies = policies\n self.n_select = n_select\n\n def __call__(self, img: Image) -> Image:\n \"\"\"Run augmentations.\"\"\"\n chosen_policies = random.sample(self.policies, k=self.n_select)\n for name, pr, level in chain.from_iterable(chosen_policies):\n if random.random() > pr:\n continue\n img = self._apply_augment(img, name, level)\n return img\n", "source": "the_stack_v2_python_sparse", "source_path": "src/augmentation/methods.py", "source_repo": "scott-mao/DenseDepth_Pruning", "split": "test", "star_events_count": 1} {"blob_id": "0960020f36e83c50446ef6ea1006b6c5531c6f8d", "bodies": ["RefTester = ROOT.RefTester\na = std.vector(RefTester)()\na.push_back(RefTester(42))\nself.assertEqual(len(a), 1)\nself.assertEqual(a[0].m_i, 42)\na[0] = RefTester(33)\nself.assertEqual(len(a), 1)\nself.assertEqual(a[0].m_i, 33)", "RefTesterNoAssign = ROOT.RefTesterNoAssign\na = RefTesterNoAssign()\nself.assertEqual(type(a), type(a[0]))\nself.assertRaises(TypeError, a.__setitem__, 0, RefTesterNoAssign())\ntry:\n a[0] = RefTesterNoAssign()\nexcept TypeError as e:\n if not legacy_pyroot:\n self.assertTrue('cannot assign' in str(e))\n else:\n self.assertTrue('can not assign' in str(e))"], "bodies_text": "<|body_start_0|>\n RefTester = ROOT.RefTester\n a = std.vector(RefTester)()\n a.push_back(RefTester(42))\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 42)\n a[0] = RefTester(33)\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 33)\n<|end_body_0|>\n\n<|body_start_1|>\n RefTesterNoAssign = ROOT.RefTesterNoAssign\n a = RefTesterNoAssign()\n self.assertEqual(type(a), type(a[0]))\n self.assertRaises(TypeError, a.__setitem__, 0, RefTesterNoAssign())\n try:\n a[0] = RefTesterNoAssign()\n except TypeError as e:\n if not legacy_pyroot:\n self.assertTrue('cannot assign' in str(e))\n else:\n self.assertTrue('can not assign' in str(e))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Cpp05AssignToRefArbitraryClass", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Cpp05AssignToRefArbitraryClass:\n\n def test1AssignToReturnByRef(self):\n \"\"\"Test assignment to an instance returned by reference\"\"\"\n <|body_0|>\n\n def test2NiceErrorMessageReturnByRef(self):\n \"\"\"Want nice error message of failing assign by reference\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n RefTester = ROOT.RefTester\n a = std.vector(RefTester)()\n a.push_back(RefTester(42))\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 42)\n a[0] = RefTester(33)\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 33)\n<|end_body_0|>\n\n<|body_start_1|>\n RefTesterNoAssign = ROOT.RefTesterNoAssign\n a = RefTesterNoAssign()\n self.assertEqual(type(a), type(a[0]))\n self.assertRaises(TypeError, a.__setitem__, 0, RefTesterNoAssign())\n try:\n a[0] = RefTesterNoAssign()\n except TypeError as e:\n if not legacy_pyroot:\n self.assertTrue('cannot assign' in str(e))\n else:\n self.assertTrue('can not assign' in str(e))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000012", "length_bytes": 30462, "license_type": "no_license", "methods": [{"docstring": "Test assignment to an instance returned by reference", "name": "test1AssignToReturnByRef", "signature": "def test1AssignToReturnByRef(self)"}, {"docstring": "Want nice error message of failing assign by reference", "name": "test2NiceErrorMessageReturnByRef", "signature": "def test2NiceErrorMessageReturnByRef(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031979", "prompt": "Implement the Python class `Cpp05AssignToRefArbitraryClass` described below.\n\nClass description:\nImplement the Cpp05AssignToRefArbitraryClass class.\n\nMethod signatures and docstrings:\n- def test1AssignToReturnByRef(self): Test assignment to an instance returned by reference\n- def test2NiceErrorMessageReturnByRef(self): Want nice error message of failing assign by reference", "prompted_full_text": "Implement the Python class `Cpp05AssignToRefArbitraryClass` described below.\n\nClass description:\nImplement the Cpp05AssignToRefArbitraryClass class.\n\nMethod signatures and docstrings:\n- def test1AssignToReturnByRef(self): Test assignment to an instance returned by reference\n- def test2NiceErrorMessageReturnByRef(self): Want nice error message of failing assign by reference\n\n<|skeleton|>\nclass Cpp05AssignToRefArbitraryClass:\n\n def test1AssignToReturnByRef(self):\n \"\"\"Test assignment to an instance returned by reference\"\"\"\n <|body_0|>\n\n def test2NiceErrorMessageReturnByRef(self):\n \"\"\"Want nice error message of failing assign by reference\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n RefTester = ROOT.RefTester\n a = std.vector(RefTester)()\n a.push_back(RefTester(42))\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 42)\n a[0] = RefTester(33)\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 33)\n<|end_body_0|>\n\n<|body_start_1|>\n RefTesterNoAssign = ROOT.RefTesterNoAssign\n a = RefTesterNoAssign()\n self.assertEqual(type(a), type(a[0]))\n self.assertRaises(TypeError, a.__setitem__, 0, RefTesterNoAssign())\n try:\n a[0] = RefTesterNoAssign()\n except TypeError as e:\n if not legacy_pyroot:\n self.assertTrue('cannot assign' in str(e))\n else:\n self.assertTrue('can not assign' in str(e))\n<|end_body_1|>\n", "revision_id": "134508460915282a5d82d6cbbb6e6afa14653413", "skeleton": "<|skeleton|>\nclass Cpp05AssignToRefArbitraryClass:\n\n def test1AssignToReturnByRef(self):\n \"\"\"Test assignment to an instance returned by reference\"\"\"\n <|body_0|>\n\n def test2NiceErrorMessageReturnByRef(self):\n \"\"\"Want nice error message of failing assign by reference\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Cpp05AssignToRefArbitraryClass:\n def test1AssignToReturnByRef(self):\n \"\"\"Test assignment to an instance returned by reference\"\"\"\n RefTester = ROOT.RefTester\n a = std.vector(RefTester)()\n a.push_back(RefTester(42))\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 42)\n a[0] = RefTester(33)\n self.assertEqual(len(a), 1)\n self.assertEqual(a[0].m_i, 33)\n\n def test2NiceErrorMessageReturnByRef(self):\n \"\"\"Want nice error message of failing assign by reference\"\"\"\n RefTesterNoAssign = ROOT.RefTesterNoAssign\n a = RefTesterNoAssign()\n self.assertEqual(type(a), type(a[0]))\n self.assertRaises(TypeError, a.__setitem__, 0, RefTesterNoAssign())\n try:\n a[0] = RefTesterNoAssign()\n except TypeError as e:\n if not legacy_pyroot:\n self.assertTrue('cannot assign' in str(e))\n else:\n self.assertTrue('can not assign' in str(e))\n", "source": "the_stack_v2_python_sparse", "source_path": "python/cpp/PyROOT_advancedtests.py", "source_repo": "root-project/roottest", "split": "test", "star_events_count": 41} {"blob_id": "b6a79dd37c7806e84fd3e2e2ed7201ba89d519df", "bodies": ["create_url = f'https://qyapi.weixin.qq.com/cgi-bin/department/create?access_token={self.token}'\n'\\n 1、把接口请求信息封装到字典中\\n 2、接口中不需要再引入requests\\n '\nreq = {'method': 'post', 'url': create_url, 'json': data}\nr = self.send_api(req)\nreturn r.json()", "url = f'https://qyapi.weixin.qq.com/cgi-bin/department/update?access_token={self.token}'\nreq = {'method': 'post', 'url': url, 'json': data}\nr = self.send_api(req)\nreturn r.json()", "url = f'https://qyapi.weixin.qq.com/cgi-bin/department/delete?access_token={self.token}&id={depart_id}'\nreq = {'method': 'get', 'url': url}\nr = self.send_api(req)\nreturn r.json()", "url = f'https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token={self.token}'\nreq = {'method': 'get', 'url': url}\nr = self.send_api(req)\nreturn r.json()", "department_info = self.get_department()\nid_list = Utils.base_jsonpath(department_info, '$..id')\nfor i in id_list:\n if i != 1:\n self.delete_department(i)"], "bodies_text": "<|body_start_0|>\n create_url = f'https://qyapi.weixin.qq.com/cgi-bin/department/create?access_token={self.token}'\n '\\n 1、把接口请求信息封装到字典中\\n 2、接口中不需要再引入requests\\n '\n req = {'method': 'post', 'url': create_url, 'json': data}\n r = self.send_api(req)\n return r.json()\n<|end_body_0|>\n\n<|body_start_1|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/update?access_token={self.token}'\n req = {'method': 'post', 'url': url, 'json': data}\n r = self.send_api(req)\n return r.json()\n<|end_body_1|>\n\n<|body_start_2|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/delete?access_token={self.token}&id={depart_id}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n<|end_body_2|>\n\n<|body_start_3|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token={self.token}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n<|end_body_3|>\n\n<|body_start_4|>\n department_info = self.get_department()\n id_list = Utils.base_jsonpath(department_info, '$..id')\n for i in id_list:\n if i != 1:\n self.delete_department(i)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Department", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Department:\n\n def creat_department(self, data):\n \"\"\"创建部门 :return:创建部门接口的响应\"\"\"\n <|body_0|>\n\n def update_department(self, data):\n \"\"\"更新部门信息 :return:更新部门接口的响应\"\"\"\n <|body_1|>\n\n def delete_department(self, depart_id):\n \"\"\"删除部门信息 :return:删除部门接口的响应\"\"\"\n <|body_2|>\n\n def get_department(self):\n \"\"\"获取部门列表 :return:获取部门接口的响应\"\"\"\n <|body_3|>\n\n def clear_departments(self):\n \"\"\"清理已经存在的部门信息\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n create_url = f'https://qyapi.weixin.qq.com/cgi-bin/department/create?access_token={self.token}'\n '\\n 1、把接口请求信息封装到字典中\\n 2、接口中不需要再引入requests\\n '\n req = {'method': 'post', 'url': create_url, 'json': data}\n r = self.send_api(req)\n return r.json()\n<|end_body_0|>\n\n<|body_start_1|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/update?access_token={self.token}'\n req = {'method': 'post', 'url': url, 'json': data}\n r = self.send_api(req)\n return r.json()\n<|end_body_1|>\n\n<|body_start_2|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/delete?access_token={self.token}&id={depart_id}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n<|end_body_2|>\n\n<|body_start_3|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token={self.token}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n<|end_body_3|>\n\n<|body_start_4|>\n department_info = self.get_department()\n id_list = Utils.base_jsonpath(department_info, '$..id')\n for i in id_list:\n if i != 1:\n self.delete_department(i)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000013", "length_bytes": 2542, "license_type": "no_license", "methods": [{"docstring": "创建部门 :return:创建部门接口的响应", "name": "creat_department", "signature": "def creat_department(self, data)"}, {"docstring": "更新部门信息 :return:更新部门接口的响应", "name": "update_department", "signature": "def update_department(self, data)"}, {"docstring": "删除部门信息 :return:删除部门接口的响应", "name": "delete_department", "signature": "def delete_department(self, depart_id)"}, {"docstring": "获取部门列表 :return:获取部门接口的响应", "name": "get_department", "signature": "def get_department(self)"}, {"docstring": "清理已经存在的部门信息", "name": "clear_departments", "signature": "def clear_departments(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_052383", "prompt": "Implement the Python class `Department` described below.\n\nClass description:\nImplement the Department class.\n\nMethod signatures and docstrings:\n- def creat_department(self, data): 创建部门 :return:创建部门接口的响应\n- def update_department(self, data): 更新部门信息 :return:更新部门接口的响应\n- def delete_department(self, depart_id): 删除部门信息 :return:删除部门接口的响应\n- def get_department(self): 获取部门列表 :return:获取部门接口的响应\n- def clear_departments(self): 清理已经存在的部门信息", "prompted_full_text": "Implement the Python class `Department` described below.\n\nClass description:\nImplement the Department class.\n\nMethod signatures and docstrings:\n- def creat_department(self, data): 创建部门 :return:创建部门接口的响应\n- def update_department(self, data): 更新部门信息 :return:更新部门接口的响应\n- def delete_department(self, depart_id): 删除部门信息 :return:删除部门接口的响应\n- def get_department(self): 获取部门列表 :return:获取部门接口的响应\n- def clear_departments(self): 清理已经存在的部门信息\n\n<|skeleton|>\nclass Department:\n\n def creat_department(self, data):\n \"\"\"创建部门 :return:创建部门接口的响应\"\"\"\n <|body_0|>\n\n def update_department(self, data):\n \"\"\"更新部门信息 :return:更新部门接口的响应\"\"\"\n <|body_1|>\n\n def delete_department(self, depart_id):\n \"\"\"删除部门信息 :return:删除部门接口的响应\"\"\"\n <|body_2|>\n\n def get_department(self):\n \"\"\"获取部门列表 :return:获取部门接口的响应\"\"\"\n <|body_3|>\n\n def clear_departments(self):\n \"\"\"清理已经存在的部门信息\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n create_url = f'https://qyapi.weixin.qq.com/cgi-bin/department/create?access_token={self.token}'\n '\\n 1、把接口请求信息封装到字典中\\n 2、接口中不需要再引入requests\\n '\n req = {'method': 'post', 'url': create_url, 'json': data}\n r = self.send_api(req)\n return r.json()\n<|end_body_0|>\n\n<|body_start_1|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/update?access_token={self.token}'\n req = {'method': 'post', 'url': url, 'json': data}\n r = self.send_api(req)\n return r.json()\n<|end_body_1|>\n\n<|body_start_2|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/delete?access_token={self.token}&id={depart_id}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n<|end_body_2|>\n\n<|body_start_3|>\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token={self.token}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n<|end_body_3|>\n\n<|body_start_4|>\n department_info = self.get_department()\n id_list = Utils.base_jsonpath(department_info, '$..id')\n for i in id_list:\n if i != 1:\n self.delete_department(i)\n<|end_body_4|>\n", "revision_id": "cf66ffa38cb7196a533b89d9b31313a270a7cd84", "skeleton": "<|skeleton|>\nclass Department:\n\n def creat_department(self, data):\n \"\"\"创建部门 :return:创建部门接口的响应\"\"\"\n <|body_0|>\n\n def update_department(self, data):\n \"\"\"更新部门信息 :return:更新部门接口的响应\"\"\"\n <|body_1|>\n\n def delete_department(self, depart_id):\n \"\"\"删除部门信息 :return:删除部门接口的响应\"\"\"\n <|body_2|>\n\n def get_department(self):\n \"\"\"获取部门列表 :return:获取部门接口的响应\"\"\"\n <|body_3|>\n\n def clear_departments(self):\n \"\"\"清理已经存在的部门信息\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Department:\n def creat_department(self, data):\n \"\"\"创建部门 :return:创建部门接口的响应\"\"\"\n create_url = f'https://qyapi.weixin.qq.com/cgi-bin/department/create?access_token={self.token}'\n '\\n 1、把接口请求信息封装到字典中\\n 2、接口中不需要再引入requests\\n '\n req = {'method': 'post', 'url': create_url, 'json': data}\n r = self.send_api(req)\n return r.json()\n\n def update_department(self, data):\n \"\"\"更新部门信息 :return:更新部门接口的响应\"\"\"\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/update?access_token={self.token}'\n req = {'method': 'post', 'url': url, 'json': data}\n r = self.send_api(req)\n return r.json()\n\n def delete_department(self, depart_id):\n \"\"\"删除部门信息 :return:删除部门接口的响应\"\"\"\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/delete?access_token={self.token}&id={depart_id}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n\n def get_department(self):\n \"\"\"获取部门列表 :return:获取部门接口的响应\"\"\"\n url = f'https://qyapi.weixin.qq.com/cgi-bin/department/list?access_token={self.token}'\n req = {'method': 'get', 'url': url}\n r = self.send_api(req)\n return r.json()\n\n def clear_departments(self):\n \"\"\"清理已经存在的部门信息\"\"\"\n department_info = self.get_department()\n id_list = Utils.base_jsonpath(department_info, '$..id')\n for i in id_list:\n if i != 1:\n self.delete_department(i)\n", "source": "the_stack_v2_python_sparse", "source_path": "w_service/apis/department.py", "source_repo": "yfgbamboo/wework", "split": "test", "star_events_count": 0} {"blob_id": "f1e38d8bfd7cb327c54ab28359356e724ec92b09", "bodies": ["input_json = request.data\noutput_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'SessionDetails', 'Payload'], [input_json['AvailabilityDetails'], input_json['AuthenticationDetails'], input_json['SessionDetails'], None]))\ntry:\n json_params = input_json['APIParams']\n json_params['profile_id'] = input_json['SessionDetails']['Payload']['profile_id']\n output_json['Payload'] = self.create_notification_json(json_params)\n return Response(output_json)\nexcept Exception as ex:\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return Response(output_json)", "input_json = request\noutput_json = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Notification is created successfully', dict()]))\ntry:\n create_invoice_var = create_notification(input_json)\n match = re.findall(\"'Status': 'Failure'\", str(create_invoice_var))\n if match:\n return create_invoice_var\n output_json['Payload'] = create_invoice_var['Payload']\n return output_json\nexcept Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return output_json"], "bodies_text": "<|body_start_0|>\n input_json = request.data\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'SessionDetails', 'Payload'], [input_json['AvailabilityDetails'], input_json['AuthenticationDetails'], input_json['SessionDetails'], None]))\n try:\n json_params = input_json['APIParams']\n json_params['profile_id'] = input_json['SessionDetails']['Payload']['profile_id']\n output_json['Payload'] = self.create_notification_json(json_params)\n return Response(output_json)\n except Exception as ex:\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return Response(output_json)\n<|end_body_0|>\n\n<|body_start_1|>\n input_json = request\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Notification is created successfully', dict()]))\n try:\n create_invoice_var = create_notification(input_json)\n match = re.findall(\"'Status': 'Failure'\", str(create_invoice_var))\n if match:\n return create_invoice_var\n output_json['Payload'] = create_invoice_var['Payload']\n return output_json\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return output_json\n<|end_body_1|>\n", "class_docstring": "This API will create a notification", "class_name": "CreateNotificationAPI", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreateNotificationAPI:\n \"\"\"This API will create a notification\"\"\"\n\n def post(self, request):\n \"\"\"Post function to crete a notification\"\"\"\n <|body_0|>\n\n def create_notification_json(self, request):\n \"\"\"This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_json = request.data\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'SessionDetails', 'Payload'], [input_json['AvailabilityDetails'], input_json['AuthenticationDetails'], input_json['SessionDetails'], None]))\n try:\n json_params = input_json['APIParams']\n json_params['profile_id'] = input_json['SessionDetails']['Payload']['profile_id']\n output_json['Payload'] = self.create_notification_json(json_params)\n return Response(output_json)\n except Exception as ex:\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return Response(output_json)\n<|end_body_0|>\n\n<|body_start_1|>\n input_json = request\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Notification is created successfully', dict()]))\n try:\n create_invoice_var = create_notification(input_json)\n match = re.findall(\"'Status': 'Failure'\", str(create_invoice_var))\n if match:\n return create_invoice_var\n output_json['Payload'] = create_invoice_var['Payload']\n return output_json\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return output_json\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000014", "length_bytes": 3034, "license_type": "no_license", "methods": [{"docstring": "Post function to crete a notification", "name": "post", "signature": "def post(self, request)"}, {"docstring": "This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return", "name": "create_notification_json", "signature": "def create_notification_json(self, request)"}], "n_methods": 2, "prompt": "Implement the Python class `CreateNotificationAPI` described below.\n\nClass description:\nThis API will create a notification\n\nMethod signatures and docstrings:\n- def post(self, request): Post function to crete a notification\n- def create_notification_json(self, request): This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return", "prompted_full_text": "Implement the Python class `CreateNotificationAPI` described below.\n\nClass description:\nThis API will create a notification\n\nMethod signatures and docstrings:\n- def post(self, request): Post function to crete a notification\n- def create_notification_json(self, request): This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return\n\n<|skeleton|>\nclass CreateNotificationAPI:\n \"\"\"This API will create a notification\"\"\"\n\n def post(self, request):\n \"\"\"Post function to crete a notification\"\"\"\n <|body_0|>\n\n def create_notification_json(self, request):\n \"\"\"This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_json = request.data\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'SessionDetails', 'Payload'], [input_json['AvailabilityDetails'], input_json['AuthenticationDetails'], input_json['SessionDetails'], None]))\n try:\n json_params = input_json['APIParams']\n json_params['profile_id'] = input_json['SessionDetails']['Payload']['profile_id']\n output_json['Payload'] = self.create_notification_json(json_params)\n return Response(output_json)\n except Exception as ex:\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return Response(output_json)\n<|end_body_0|>\n\n<|body_start_1|>\n input_json = request\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Notification is created successfully', dict()]))\n try:\n create_invoice_var = create_notification(input_json)\n match = re.findall(\"'Status': 'Failure'\", str(create_invoice_var))\n if match:\n return create_invoice_var\n output_json['Payload'] = create_invoice_var['Payload']\n return output_json\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return output_json\n<|end_body_1|>\n", "revision_id": "36eb9931f330e64902354c6fc471be2adf4b7049", "skeleton": "<|skeleton|>\nclass CreateNotificationAPI:\n \"\"\"This API will create a notification\"\"\"\n\n def post(self, request):\n \"\"\"Post function to crete a notification\"\"\"\n <|body_0|>\n\n def create_notification_json(self, request):\n \"\"\"This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CreateNotificationAPI:\n \"\"\"This API will create a notification\"\"\"\n\n def post(self, request):\n \"\"\"Post function to crete a notification\"\"\"\n input_json = request.data\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'SessionDetails', 'Payload'], [input_json['AvailabilityDetails'], input_json['AuthenticationDetails'], input_json['SessionDetails'], None]))\n try:\n json_params = input_json['APIParams']\n json_params['profile_id'] = input_json['SessionDetails']['Payload']['profile_id']\n output_json['Payload'] = self.create_notification_json(json_params)\n return Response(output_json)\n except Exception as ex:\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return Response(output_json)\n\n def create_notification_json(self, request):\n \"\"\"This API will create a notification :param request: { 'notification_text':'new ticket is raised', 'type_id':1, 'distribution_type_id': 1, 'notifier_profile_id': 32/null, 'notified_profile_id':55/null, 'algorithm_id':10/null, 'redirection_url':'url_for_redirection', 'comments':'important comment' } :return\"\"\"\n input_json = request\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Notification is created successfully', dict()]))\n try:\n create_invoice_var = create_notification(input_json)\n match = re.findall(\"'Status': 'Failure'\", str(create_invoice_var))\n if match:\n return create_invoice_var\n output_json['Payload'] = create_invoice_var['Payload']\n return output_json\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message', 'Payload'], ['Failure', f'Exception Encountered.Something went wrong {ex}', None]))\n return output_json\n", "source": "the_stack_v2_python_sparse", "source_path": "Generic/common/notifications_new/api/create_notification/views_create_notification.py", "source_repo": "archiemb303/common_backend_django", "split": "test", "star_events_count": 0} {"blob_id": "b6f4219ea5c5c6e66dc06b2aed6bdcb8ee040b7f", "bodies": ["new_direct_deposit = DirectDeposit()\nnew_direct_deposit.account_number = request.data['account_number']\nnew_direct_deposit.routing_number = request.data['routing_number']\nnew_direct_deposit.bank_name = request.data['bank_name']\nnew_direct_deposit.employee = employee\nnew_direct_deposit.account_name = request.data['account_name']\nnew_direct_deposit.save()\nserializer = DirectDepositSerializer(new_direct_deposit, context={'request': request})\nreturn Response(serializer.data, status=status.HTTP_201_CREATED)", "try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n serializer = DirectDepositSerializer(direct_deposit, context={'request': request})\n return Response(serializer.data)\nexcept Exception as ex:\n return HttpResponseServerError(ex)", "try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n direct_deposit.delete()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\nexcept DirectDeposit.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\nexcept Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "direct_deposit = DirectDeposit.objects.all()\nserializer = DirectDepositSerializer(direct_deposit, many=True, context={'request': request})\nreturn Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n new_direct_deposit = DirectDeposit()\n new_direct_deposit.account_number = request.data['account_number']\n new_direct_deposit.routing_number = request.data['routing_number']\n new_direct_deposit.bank_name = request.data['bank_name']\n new_direct_deposit.employee = employee\n new_direct_deposit.account_name = request.data['account_name']\n new_direct_deposit.save()\n serializer = DirectDepositSerializer(new_direct_deposit, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n serializer = DirectDepositSerializer(direct_deposit, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n direct_deposit.delete()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except DirectDeposit.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_2|>\n\n<|body_start_3|>\n direct_deposit = DirectDeposit.objects.all()\n serializer = DirectDepositSerializer(direct_deposit, many=True, context={'request': request})\n return Response(serializer.data)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "DirectDeposits", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DirectDeposits:\n\n def create(self, request):\n \"\"\"Handle POST operations Returns: Response -- JSON serialized direct deposit instance\"\"\"\n <|body_0|>\n\n def retrieve(self, request, pk=None):\n \"\"\"Handle GET requests for single deposit accounts\"\"\"\n <|body_1|>\n\n def destroy(self, request, pk=None):\n \"\"\"Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code\"\"\"\n <|body_2|>\n\n def list(self, request):\n \"\"\"Handle GET requests to direct deposit resource\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n new_direct_deposit = DirectDeposit()\n new_direct_deposit.account_number = request.data['account_number']\n new_direct_deposit.routing_number = request.data['routing_number']\n new_direct_deposit.bank_name = request.data['bank_name']\n new_direct_deposit.employee = employee\n new_direct_deposit.account_name = request.data['account_name']\n new_direct_deposit.save()\n serializer = DirectDepositSerializer(new_direct_deposit, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n serializer = DirectDepositSerializer(direct_deposit, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n direct_deposit.delete()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except DirectDeposit.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_2|>\n\n<|body_start_3|>\n direct_deposit = DirectDeposit.objects.all()\n serializer = DirectDepositSerializer(direct_deposit, many=True, context={'request': request})\n return Response(serializer.data)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000015", "length_bytes": 2915, "license_type": "no_license", "methods": [{"docstring": "Handle POST operations Returns: Response -- JSON serialized direct deposit instance", "name": "create", "signature": "def create(self, request)"}, {"docstring": "Handle GET requests for single deposit accounts", "name": "retrieve", "signature": "def retrieve(self, request, pk=None)"}, {"docstring": "Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code", "name": "destroy", "signature": "def destroy(self, request, pk=None)"}, {"docstring": "Handle GET requests to direct deposit resource", "name": "list", "signature": "def list(self, request)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_050328", "prompt": "Implement the Python class `DirectDeposits` described below.\n\nClass description:\nImplement the DirectDeposits class.\n\nMethod signatures and docstrings:\n- def create(self, request): Handle POST operations Returns: Response -- JSON serialized direct deposit instance\n- def retrieve(self, request, pk=None): Handle GET requests for single deposit accounts\n- def destroy(self, request, pk=None): Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code\n- def list(self, request): Handle GET requests to direct deposit resource", "prompted_full_text": "Implement the Python class `DirectDeposits` described below.\n\nClass description:\nImplement the DirectDeposits class.\n\nMethod signatures and docstrings:\n- def create(self, request): Handle POST operations Returns: Response -- JSON serialized direct deposit instance\n- def retrieve(self, request, pk=None): Handle GET requests for single deposit accounts\n- def destroy(self, request, pk=None): Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code\n- def list(self, request): Handle GET requests to direct deposit resource\n\n<|skeleton|>\nclass DirectDeposits:\n\n def create(self, request):\n \"\"\"Handle POST operations Returns: Response -- JSON serialized direct deposit instance\"\"\"\n <|body_0|>\n\n def retrieve(self, request, pk=None):\n \"\"\"Handle GET requests for single deposit accounts\"\"\"\n <|body_1|>\n\n def destroy(self, request, pk=None):\n \"\"\"Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code\"\"\"\n <|body_2|>\n\n def list(self, request):\n \"\"\"Handle GET requests to direct deposit resource\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n new_direct_deposit = DirectDeposit()\n new_direct_deposit.account_number = request.data['account_number']\n new_direct_deposit.routing_number = request.data['routing_number']\n new_direct_deposit.bank_name = request.data['bank_name']\n new_direct_deposit.employee = employee\n new_direct_deposit.account_name = request.data['account_name']\n new_direct_deposit.save()\n serializer = DirectDepositSerializer(new_direct_deposit, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n serializer = DirectDepositSerializer(direct_deposit, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n direct_deposit.delete()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except DirectDeposit.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n<|end_body_2|>\n\n<|body_start_3|>\n direct_deposit = DirectDeposit.objects.all()\n serializer = DirectDepositSerializer(direct_deposit, many=True, context={'request': request})\n return Response(serializer.data)\n<|end_body_3|>\n", "revision_id": "640979a97e6efcbab6f20dbff5dbd41e4f77c021", "skeleton": "<|skeleton|>\nclass DirectDeposits:\n\n def create(self, request):\n \"\"\"Handle POST operations Returns: Response -- JSON serialized direct deposit instance\"\"\"\n <|body_0|>\n\n def retrieve(self, request, pk=None):\n \"\"\"Handle GET requests for single deposit accounts\"\"\"\n <|body_1|>\n\n def destroy(self, request, pk=None):\n \"\"\"Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code\"\"\"\n <|body_2|>\n\n def list(self, request):\n \"\"\"Handle GET requests to direct deposit resource\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DirectDeposits:\n def create(self, request):\n \"\"\"Handle POST operations Returns: Response -- JSON serialized direct deposit instance\"\"\"\n new_direct_deposit = DirectDeposit()\n new_direct_deposit.account_number = request.data['account_number']\n new_direct_deposit.routing_number = request.data['routing_number']\n new_direct_deposit.bank_name = request.data['bank_name']\n new_direct_deposit.employee = employee\n new_direct_deposit.account_name = request.data['account_name']\n new_direct_deposit.save()\n serializer = DirectDepositSerializer(new_direct_deposit, context={'request': request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n\n def retrieve(self, request, pk=None):\n \"\"\"Handle GET requests for single deposit accounts\"\"\"\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n serializer = DirectDepositSerializer(direct_deposit, context={'request': request})\n return Response(serializer.data)\n except Exception as ex:\n return HttpResponseServerError(ex)\n\n def destroy(self, request, pk=None):\n \"\"\"Handle DELETE requests for a employee deposit account Returns: Response -- 200, 404, or 500 status code\"\"\"\n try:\n direct_deposit = DirectDeposit.objects.get(pk=pk)\n direct_deposit.delete()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except DirectDeposit.DoesNotExist as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)\n except Exception as ex:\n return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n def list(self, request):\n \"\"\"Handle GET requests to direct deposit resource\"\"\"\n direct_deposit = DirectDeposit.objects.all()\n serializer = DirectDepositSerializer(direct_deposit, many=True, context={'request': request})\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "capstoneapi/views/directdeposit.py", "source_repo": "medlenmage/capstonebackend", "split": "test", "star_events_count": 0} {"blob_id": "208caad62264c463b2a1cd903ae6a969b0a7bc16", "bodies": ["if dep_type == DependencyType.INACTIVE_INACTIVE:\n return DependencyType.ACTIVE_ACTIVE\nelif dep_type == DependencyType.ACTIVE_ACTIVE:\n return DependencyType.INACTIVE_INACTIVE\nelse:\n return dep_type", "if dep_type == DependencyType.INACTIVE_INACTIVE or dep_type == DependencyType.INACTIVE_ACTIVE:\n return ReluState.INACTIVE\nelse:\n return ReluState.ACTIVE", "if dep == DependencyType.INACTIVE_INACTIVE or dep == DependencyType.ACTIVE_INACTIVE:\n return ReluState.INACTIVE\nelse:\n return ReluState.ACTIVE"], "bodies_text": "<|body_start_0|>\n if dep_type == DependencyType.INACTIVE_INACTIVE:\n return DependencyType.ACTIVE_ACTIVE\n elif dep_type == DependencyType.ACTIVE_ACTIVE:\n return DependencyType.INACTIVE_INACTIVE\n else:\n return dep_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dep_type == DependencyType.INACTIVE_INACTIVE or dep_type == DependencyType.INACTIVE_ACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n<|end_body_1|>\n\n<|body_start_2|>\n if dep == DependencyType.INACTIVE_INACTIVE or dep == DependencyType.ACTIVE_INACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n<|end_body_2|>\n", "class_docstring": "Types of dependencies.", "class_name": "DependencyType", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DependencyType:\n \"\"\"Types of dependencies.\"\"\"\n\n def inverse(dep_type):\n \"\"\"Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type\"\"\"\n <|body_0|>\n\n def antecedent(dep_type):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type\"\"\"\n <|body_1|>\n\n def consequent(dep):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if dep_type == DependencyType.INACTIVE_INACTIVE:\n return DependencyType.ACTIVE_ACTIVE\n elif dep_type == DependencyType.ACTIVE_ACTIVE:\n return DependencyType.INACTIVE_INACTIVE\n else:\n return dep_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dep_type == DependencyType.INACTIVE_INACTIVE or dep_type == DependencyType.INACTIVE_ACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n<|end_body_1|>\n\n<|body_start_2|>\n if dep == DependencyType.INACTIVE_INACTIVE or dep == DependencyType.ACTIVE_INACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000016", "length_bytes": 2006, "license_type": "permissive", "methods": [{"docstring": "Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type", "name": "inverse", "signature": "def inverse(dep_type)"}, {"docstring": "Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type", "name": "antecedent", "signature": "def antecedent(dep_type)"}, {"docstring": "Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type", "name": "consequent", "signature": "def consequent(dep)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_025485", "prompt": "Implement the Python class `DependencyType` described below.\n\nClass description:\nTypes of dependencies.\n\nMethod signatures and docstrings:\n- def inverse(dep_type): Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type\n- def antecedent(dep_type): Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type\n- def consequent(dep): Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type", "prompted_full_text": "Implement the Python class `DependencyType` described below.\n\nClass description:\nTypes of dependencies.\n\nMethod signatures and docstrings:\n- def inverse(dep_type): Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type\n- def antecedent(dep_type): Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type\n- def consequent(dep): Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type\n\n<|skeleton|>\nclass DependencyType:\n \"\"\"Types of dependencies.\"\"\"\n\n def inverse(dep_type):\n \"\"\"Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type\"\"\"\n <|body_0|>\n\n def antecedent(dep_type):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type\"\"\"\n <|body_1|>\n\n def consequent(dep):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if dep_type == DependencyType.INACTIVE_INACTIVE:\n return DependencyType.ACTIVE_ACTIVE\n elif dep_type == DependencyType.ACTIVE_ACTIVE:\n return DependencyType.INACTIVE_INACTIVE\n else:\n return dep_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dep_type == DependencyType.INACTIVE_INACTIVE or dep_type == DependencyType.INACTIVE_ACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n<|end_body_1|>\n\n<|body_start_2|>\n if dep == DependencyType.INACTIVE_INACTIVE or dep == DependencyType.ACTIVE_INACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n<|end_body_2|>\n", "revision_id": "57e9608041d230b5d78c4f2afb890b81035436a1", "skeleton": "<|skeleton|>\nclass DependencyType:\n \"\"\"Types of dependencies.\"\"\"\n\n def inverse(dep_type):\n \"\"\"Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type\"\"\"\n <|body_0|>\n\n def antecedent(dep_type):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type\"\"\"\n <|body_1|>\n\n def consequent(dep):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DependencyType:\n \"\"\"Types of dependencies.\"\"\"\n\n def inverse(dep_type):\n \"\"\"Arguments: dep_type: an instance of DependencyType Returns the symmetric dependency of dep_type\"\"\"\n if dep_type == DependencyType.INACTIVE_INACTIVE:\n return DependencyType.ACTIVE_ACTIVE\n elif dep_type == DependencyType.ACTIVE_ACTIVE:\n return DependencyType.INACTIVE_INACTIVE\n else:\n return dep_type\n\n def antecedent(dep_type):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the antecedent node state of the dep_type\"\"\"\n if dep_type == DependencyType.INACTIVE_INACTIVE or dep_type == DependencyType.INACTIVE_ACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n\n def consequent(dep):\n \"\"\"Arguments: dep_type: an instance of DepType Returns: the consequent node state of the dep_type\"\"\"\n if dep == DependencyType.INACTIVE_INACTIVE or dep == DependencyType.ACTIVE_INACTIVE:\n return ReluState.INACTIVE\n else:\n return ReluState.ACTIVE\n", "source": "the_stack_v2_python_sparse", "source_path": "src/utils/DependencyType.py", "source_repo": "pkouvaros/venus2_vnncomp21", "split": "test", "star_events_count": 0} {"blob_id": "f2d7c36130457070e88aabf3f813c36a3f7524c2", "bodies": ["self._logger = Logger(verbose)\nif maximum_iterations <= 0:\n self._logger.log('Maximum number of iterations must be greater than zero', error=True)\nself._maximum_iterations = maximum_iterations\nself._swarm = Swarm(swarm_size, Bounds(lower_bound, upper_bound, self._logger), PsoParameters(omega, phip, phig, self._logger), minimum_step, minimum_improvement, objective_function, self._logger)", "for _ in range(self._maximum_iterations):\n self._swarm.update()\n if not self._swarm.still_improving() or not self._swarm.still_moving():\n break\nreturn (self._swarm.best_position(), self._swarm.best_score())"], "bodies_text": "<|body_start_0|>\n self._logger = Logger(verbose)\n if maximum_iterations <= 0:\n self._logger.log('Maximum number of iterations must be greater than zero', error=True)\n self._maximum_iterations = maximum_iterations\n self._swarm = Swarm(swarm_size, Bounds(lower_bound, upper_bound, self._logger), PsoParameters(omega, phip, phig, self._logger), minimum_step, minimum_improvement, objective_function, self._logger)\n<|end_body_0|>\n\n<|body_start_1|>\n for _ in range(self._maximum_iterations):\n self._swarm.update()\n if not self._swarm.still_improving() or not self._swarm.still_moving():\n break\n return (self._swarm.best_position(), self._swarm.best_score())\n<|end_body_1|>\n", "class_docstring": "Pso encapsulates the creation and successive updates of a swarm of particles", "class_name": "Pso", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Pso:\n \"\"\"Pso encapsulates the creation and successive updates of a swarm of particles\"\"\"\n\n def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False):\n \"\"\"Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress\"\"\"\n <|body_0|>\n\n def run(self) -> Tuple[np.ndarray, float]:\n \"\"\"Run particle swarm optimization :return: (tuple) best position, best score\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._logger = Logger(verbose)\n if maximum_iterations <= 0:\n self._logger.log('Maximum number of iterations must be greater than zero', error=True)\n self._maximum_iterations = maximum_iterations\n self._swarm = Swarm(swarm_size, Bounds(lower_bound, upper_bound, self._logger), PsoParameters(omega, phip, phig, self._logger), minimum_step, minimum_improvement, objective_function, self._logger)\n<|end_body_0|>\n\n<|body_start_1|>\n for _ in range(self._maximum_iterations):\n self._swarm.update()\n if not self._swarm.still_improving() or not self._swarm.still_moving():\n break\n return (self._swarm.best_position(), self._swarm.best_score())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000017", "length_bytes": 17034, "license_type": "permissive", "methods": [{"docstring": "Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress", "name": "__init__", "signature": "def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False)"}, {"docstring": "Run particle swarm optimization :return: (tuple) best position, best score", "name": "run", "signature": "def run(self) -> Tuple[np.ndarray, float]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_054321", "prompt": "Implement the Python class `Pso` described below.\n\nClass description:\nPso encapsulates the creation and successive updates of a swarm of particles\n\nMethod signatures and docstrings:\n- def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False): Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress\n- def run(self) -> Tuple[np.ndarray, float]: Run particle swarm optimization :return: (tuple) best position, best score", "prompted_full_text": "Implement the Python class `Pso` described below.\n\nClass description:\nPso encapsulates the creation and successive updates of a swarm of particles\n\nMethod signatures and docstrings:\n- def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False): Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress\n- def run(self) -> Tuple[np.ndarray, float]: Run particle swarm optimization :return: (tuple) best position, best score\n\n<|skeleton|>\nclass Pso:\n \"\"\"Pso encapsulates the creation and successive updates of a swarm of particles\"\"\"\n\n def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False):\n \"\"\"Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress\"\"\"\n <|body_0|>\n\n def run(self) -> Tuple[np.ndarray, float]:\n \"\"\"Run particle swarm optimization :return: (tuple) best position, best score\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._logger = Logger(verbose)\n if maximum_iterations <= 0:\n self._logger.log('Maximum number of iterations must be greater than zero', error=True)\n self._maximum_iterations = maximum_iterations\n self._swarm = Swarm(swarm_size, Bounds(lower_bound, upper_bound, self._logger), PsoParameters(omega, phip, phig, self._logger), minimum_step, minimum_improvement, objective_function, self._logger)\n<|end_body_0|>\n\n<|body_start_1|>\n for _ in range(self._maximum_iterations):\n self._swarm.update()\n if not self._swarm.still_improving() or not self._swarm.still_moving():\n break\n return (self._swarm.best_position(), self._swarm.best_score())\n<|end_body_1|>\n", "revision_id": "b33f7893d3dfcbbc2c10076fb61b2b1f1316402a", "skeleton": "<|skeleton|>\nclass Pso:\n \"\"\"Pso encapsulates the creation and successive updates of a swarm of particles\"\"\"\n\n def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False):\n \"\"\"Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress\"\"\"\n <|body_0|>\n\n def run(self) -> Tuple[np.ndarray, float]:\n \"\"\"Run particle swarm optimization :return: (tuple) best position, best score\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Pso:\n \"\"\"Pso encapsulates the creation and successive updates of a swarm of particles\"\"\"\n\n def __init__(self, objective_function: ObjectiveFunctionBase, lower_bound: np.ndarray, upper_bound: np.ndarray, swarm_size: int, omega: float=0.5, phip: float=0.5, phig: float=0.5, maximum_iterations: int=100, minimum_step: float=1e-07, minimum_improvement: float=1e-07, threads: int=1, verbose: bool=False):\n \"\"\"Constructor of a Particle Swarm Optimizer :param objective_function: Objective function reporting the score :param lower_bound: lower bound for parameters :param upper_bound: upper bound for parameters :param swarm_size: number of particles in the swarm :param omega: omega parameter for velocity updates :param phip: phip parameter for velocity updates :param phig: phig parameter for velocity updates :param maximum_iterations: maximum number of iterations for optimization :param minimum_step: minimum particle distance :param minimum_improvement: minimum allowed improvement :param threads: number of execution threads :param verbose: enable to receive information about the progress\"\"\"\n self._logger = Logger(verbose)\n if maximum_iterations <= 0:\n self._logger.log('Maximum number of iterations must be greater than zero', error=True)\n self._maximum_iterations = maximum_iterations\n self._swarm = Swarm(swarm_size, Bounds(lower_bound, upper_bound, self._logger), PsoParameters(omega, phip, phig, self._logger), minimum_step, minimum_improvement, objective_function, self._logger)\n\n def run(self) -> Tuple[np.ndarray, float]:\n \"\"\"Run particle swarm optimization :return: (tuple) best position, best score\"\"\"\n for _ in range(self._maximum_iterations):\n self._swarm.update()\n if not self._swarm.still_improving() or not self._swarm.still_moving():\n break\n return (self._swarm.best_position(), self._swarm.best_score())\n", "source": "the_stack_v2_python_sparse", "source_path": "python_research/fastPSO/pso.py", "source_repo": "ESA-PhiLab/hypernet", "split": "test", "star_events_count": 44} {"blob_id": "c3c6c1f3ea5634d704e09f64c83ff1c7c8f5e5a1", "bodies": ["if not lang:\n lang = SettingsDAO().get_value('language', str)\nself.set_many(True)\nfor name, value in strValues.items():\n data = {'lang': lang, 'target_id': objectId, 'type': objectType.value, 'name': name, 'value': value}\n self.insert('translates', data)\nself.insert_many_execute()\nself.set_many(False)", "translates = self.select('translates', {'target_id': objectId, 'type': objectType.value, 'lang': lang})\nself.set_many(True)\nfor name, value in strValues.items():\n db_line = get_line(name, translates)\n if db_line is None:\n data_dict = {'name': name, 'value': value, 'lang': lang, 'target_id': objectId, 'type': objectType.value}\n self.insert('translates', data_dict)\n elif db_line['value'] != value:\n data_dict = {'value': value}\n self.update('translates', db_line['ID'], data_dict)\nself.insert_many_execute()\nself.set_many(False)"], "bodies_text": "<|body_start_0|>\n if not lang:\n lang = SettingsDAO().get_value('language', str)\n self.set_many(True)\n for name, value in strValues.items():\n data = {'lang': lang, 'target_id': objectId, 'type': objectType.value, 'name': name, 'value': value}\n self.insert('translates', data)\n self.insert_many_execute()\n self.set_many(False)\n<|end_body_0|>\n\n<|body_start_1|>\n translates = self.select('translates', {'target_id': objectId, 'type': objectType.value, 'lang': lang})\n self.set_many(True)\n for name, value in strValues.items():\n db_line = get_line(name, translates)\n if db_line is None:\n data_dict = {'name': name, 'value': value, 'lang': lang, 'target_id': objectId, 'type': objectType.value}\n self.insert('translates', data_dict)\n elif db_line['value'] != value:\n data_dict = {'value': value}\n self.update('translates', db_line['ID'], data_dict)\n self.insert_many_execute()\n self.set_many(False)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ObjectDatabase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ObjectDatabase:\n\n def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object\"\"\"\n <|body_0|>\n\n def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not lang:\n lang = SettingsDAO().get_value('language', str)\n self.set_many(True)\n for name, value in strValues.items():\n data = {'lang': lang, 'target_id': objectId, 'type': objectType.value, 'name': name, 'value': value}\n self.insert('translates', data)\n self.insert_many_execute()\n self.set_many(False)\n<|end_body_0|>\n\n<|body_start_1|>\n translates = self.select('translates', {'target_id': objectId, 'type': objectType.value, 'lang': lang})\n self.set_many(True)\n for name, value in strValues.items():\n db_line = get_line(name, translates)\n if db_line is None:\n data_dict = {'name': name, 'value': value, 'lang': lang, 'target_id': objectId, 'type': objectType.value}\n self.insert('translates', data_dict)\n elif db_line['value'] != value:\n data_dict = {'value': value}\n self.update('translates', db_line['ID'], data_dict)\n self.insert_many_execute()\n self.set_many(False)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000018", "length_bytes": 2991, "license_type": "no_license", "methods": [{"docstring": "Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object", "name": "insert_translate", "signature": "def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None"}, {"docstring": "Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object", "name": "update_translate", "signature": "def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001886", "prompt": "Implement the Python class `ObjectDatabase` described below.\n\nClass description:\nImplement the ObjectDatabase class.\n\nMethod signatures and docstrings:\n- def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None: Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object\n- def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None: Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object", "prompted_full_text": "Implement the Python class `ObjectDatabase` described below.\n\nClass description:\nImplement the ObjectDatabase class.\n\nMethod signatures and docstrings:\n- def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None: Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object\n- def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None: Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object\n\n<|skeleton|>\nclass ObjectDatabase:\n\n def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object\"\"\"\n <|body_0|>\n\n def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not lang:\n lang = SettingsDAO().get_value('language', str)\n self.set_many(True)\n for name, value in strValues.items():\n data = {'lang': lang, 'target_id': objectId, 'type': objectType.value, 'name': name, 'value': value}\n self.insert('translates', data)\n self.insert_many_execute()\n self.set_many(False)\n<|end_body_0|>\n\n<|body_start_1|>\n translates = self.select('translates', {'target_id': objectId, 'type': objectType.value, 'lang': lang})\n self.set_many(True)\n for name, value in strValues.items():\n db_line = get_line(name, translates)\n if db_line is None:\n data_dict = {'name': name, 'value': value, 'lang': lang, 'target_id': objectId, 'type': objectType.value}\n self.insert('translates', data_dict)\n elif db_line['value'] != value:\n data_dict = {'value': value}\n self.update('translates', db_line['ID'], data_dict)\n self.insert_many_execute()\n self.set_many(False)\n<|end_body_1|>\n", "revision_id": "40b088e061042599cbb30373ac229d37dddc01e6", "skeleton": "<|skeleton|>\nclass ObjectDatabase:\n\n def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object\"\"\"\n <|body_0|>\n\n def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ObjectDatabase:\n def insert_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Insert translate for object to database :param strValues: dictionary of names and values :param lang: lang of translate :param objectId: id ob object :param objectType: objectType of object\"\"\"\n if not lang:\n lang = SettingsDAO().get_value('language', str)\n self.set_many(True)\n for name, value in strValues.items():\n data = {'lang': lang, 'target_id': objectId, 'type': objectType.value, 'name': name, 'value': value}\n self.insert('translates', data)\n self.insert_many_execute()\n self.set_many(False)\n\n def update_translate(self, strValues: dict, lang: str, objectId: int, objectType: ObjectType) -> None:\n \"\"\"Update translates in database :param strValues: dictionary of names and values :param lang: lang of translates :param objectId: id of object :param objectType: Object type of object\"\"\"\n translates = self.select('translates', {'target_id': objectId, 'type': objectType.value, 'lang': lang})\n self.set_many(True)\n for name, value in strValues.items():\n db_line = get_line(name, translates)\n if db_line is None:\n data_dict = {'name': name, 'value': value, 'lang': lang, 'target_id': objectId, 'type': objectType.value}\n self.insert('translates', data_dict)\n elif db_line['value'] != value:\n data_dict = {'value': value}\n self.update('translates', db_line['ID'], data_dict)\n self.insert_many_execute()\n self.set_many(False)\n", "source": "the_stack_v2_python_sparse", "source_path": "Program/data/database/ObjectDatabase.py", "source_repo": "Wilson194/DeskChar", "split": "test", "star_events_count": 0} {"blob_id": "dac80dbed2c2661c42470f54a1080f20ac234a3a", "bodies": ["self.post_reqparser = reqparse.RequestParser()\nself.post_reqparser.add_argument('email', required=True, help='email field is required', location=['form', 'json'], store_missing=False)\nself.post_reqparser.add_argument('fullname', required=False, location=['form', 'json'], store_missing=False)\nself.post_reqparser.add_argument('password', required=False, location=['form', 'json'], store_missing=False)\nself.post_reqparser.add_argument('admin', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\nself.post_reqparser.add_argument('activated', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)", "if not get_jwt_claims()['admin']:\n abort(HTTPStatus.FORBIDDEN.value, error='administration privileges required')\nargs = self.post_reqparser.parse_args()\ncurrent_user = Users.find_by_email(args['email'])\nif current_user:\n if 'fullname' in args:\n current_user.fullname = args['fullname']\n if 'activated' in args:\n current_user.activated = bool(args['activated'])\n if 'admin' in args:\n current_user.admin = bool(args['admin'])\n if 'password' in args:\n current_user.password = Users.generate_hash(args['password'].encode('utf8')).decode('utf8')\n try:\n current_user.commit()\n except Exception as e:\n logging.error(e)\n return ({'message': 'failed saving details to database'}, 500)\n return ({'message': 'success'}, 200)\nelse:\n return ({'message': 'error: could not find user'}, 403)"], "bodies_text": "<|body_start_0|>\n self.post_reqparser = reqparse.RequestParser()\n self.post_reqparser.add_argument('email', required=True, help='email field is required', location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('fullname', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('password', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('admin', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('activated', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n<|end_body_0|>\n\n<|body_start_1|>\n if not get_jwt_claims()['admin']:\n abort(HTTPStatus.FORBIDDEN.value, error='administration privileges required')\n args = self.post_reqparser.parse_args()\n current_user = Users.find_by_email(args['email'])\n if current_user:\n if 'fullname' in args:\n current_user.fullname = args['fullname']\n if 'activated' in args:\n current_user.activated = bool(args['activated'])\n if 'admin' in args:\n current_user.admin = bool(args['admin'])\n if 'password' in args:\n current_user.password = Users.generate_hash(args['password'].encode('utf8')).decode('utf8')\n try:\n current_user.commit()\n except Exception as e:\n logging.error(e)\n return ({'message': 'failed saving details to database'}, 500)\n return ({'message': 'success'}, 200)\n else:\n return ({'message': 'error: could not find user'}, 403)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "EditUser", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EditUser:\n\n def __init__(self) -> None:\n \"\"\"Initialises the edit user end point\"\"\"\n <|body_0|>\n\n def post(self) -> (str, int):\n \"\"\"POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.post_reqparser = reqparse.RequestParser()\n self.post_reqparser.add_argument('email', required=True, help='email field is required', location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('fullname', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('password', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('admin', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('activated', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n<|end_body_0|>\n\n<|body_start_1|>\n if not get_jwt_claims()['admin']:\n abort(HTTPStatus.FORBIDDEN.value, error='administration privileges required')\n args = self.post_reqparser.parse_args()\n current_user = Users.find_by_email(args['email'])\n if current_user:\n if 'fullname' in args:\n current_user.fullname = args['fullname']\n if 'activated' in args:\n current_user.activated = bool(args['activated'])\n if 'admin' in args:\n current_user.admin = bool(args['admin'])\n if 'password' in args:\n current_user.password = Users.generate_hash(args['password'].encode('utf8')).decode('utf8')\n try:\n current_user.commit()\n except Exception as e:\n logging.error(e)\n return ({'message': 'failed saving details to database'}, 500)\n return ({'message': 'success'}, 200)\n else:\n return ({'message': 'error: could not find user'}, 403)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000019", "length_bytes": 3274, "license_type": "permissive", "methods": [{"docstring": "Initialises the edit user end point", "name": "__init__", "signature": "def __init__(self) -> None"}, {"docstring": "POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code", "name": "post", "signature": "def post(self) -> (str, int)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009545", "prompt": "Implement the Python class `EditUser` described below.\n\nClass description:\nImplement the EditUser class.\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Initialises the edit user end point\n- def post(self) -> (str, int): POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code", "prompted_full_text": "Implement the Python class `EditUser` described below.\n\nClass description:\nImplement the EditUser class.\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Initialises the edit user end point\n- def post(self) -> (str, int): POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code\n\n<|skeleton|>\nclass EditUser:\n\n def __init__(self) -> None:\n \"\"\"Initialises the edit user end point\"\"\"\n <|body_0|>\n\n def post(self) -> (str, int):\n \"\"\"POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.post_reqparser = reqparse.RequestParser()\n self.post_reqparser.add_argument('email', required=True, help='email field is required', location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('fullname', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('password', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('admin', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('activated', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n<|end_body_0|>\n\n<|body_start_1|>\n if not get_jwt_claims()['admin']:\n abort(HTTPStatus.FORBIDDEN.value, error='administration privileges required')\n args = self.post_reqparser.parse_args()\n current_user = Users.find_by_email(args['email'])\n if current_user:\n if 'fullname' in args:\n current_user.fullname = args['fullname']\n if 'activated' in args:\n current_user.activated = bool(args['activated'])\n if 'admin' in args:\n current_user.admin = bool(args['admin'])\n if 'password' in args:\n current_user.password = Users.generate_hash(args['password'].encode('utf8')).decode('utf8')\n try:\n current_user.commit()\n except Exception as e:\n logging.error(e)\n return ({'message': 'failed saving details to database'}, 500)\n return ({'message': 'success'}, 200)\n else:\n return ({'message': 'error: could not find user'}, 403)\n<|end_body_1|>\n", "revision_id": "5d123691d1f25d0b85e20e4e8293266bf23c9f8a", "skeleton": "<|skeleton|>\nclass EditUser:\n\n def __init__(self) -> None:\n \"\"\"Initialises the edit user end point\"\"\"\n <|body_0|>\n\n def post(self) -> (str, int):\n \"\"\"POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EditUser:\n def __init__(self) -> None:\n \"\"\"Initialises the edit user end point\"\"\"\n self.post_reqparser = reqparse.RequestParser()\n self.post_reqparser.add_argument('email', required=True, help='email field is required', location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('fullname', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('password', required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('admin', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n self.post_reqparser.add_argument('activated', type=inputs.boolean, required=False, location=['form', 'json'], store_missing=False)\n\n def post(self) -> (str, int):\n \"\"\"POST request that allows user to change their credentials in the Users table. A valid access JWT is required where the admin claim has to be True Parameters can be passed using a POST request that contains a JSON with the following fields: :required: :param email: users current email address :type email: str :optional: :param fullname: users to be fullname :param password: users to be password :param admin: users to be admin status :param activated: users to be activated status :type fullname: str :type password: str :type admin: bool :type activated: bool :return: A message indicating success or failure and the corresponding response code\"\"\"\n if not get_jwt_claims()['admin']:\n abort(HTTPStatus.FORBIDDEN.value, error='administration privileges required')\n args = self.post_reqparser.parse_args()\n current_user = Users.find_by_email(args['email'])\n if current_user:\n if 'fullname' in args:\n current_user.fullname = args['fullname']\n if 'activated' in args:\n current_user.activated = bool(args['activated'])\n if 'admin' in args:\n current_user.admin = bool(args['admin'])\n if 'password' in args:\n current_user.password = Users.generate_hash(args['password'].encode('utf8')).decode('utf8')\n try:\n current_user.commit()\n except Exception as e:\n logging.error(e)\n return ({'message': 'failed saving details to database'}, 500)\n return ({'message': 'success'}, 200)\n else:\n return ({'message': 'error: could not find user'}, 403)\n", "source": "the_stack_v2_python_sparse", "source_path": "Analytics/resources/admin/edit_user.py", "source_repo": "thanosbnt/SharingCitiesDashboard", "split": "test", "star_events_count": 0} {"blob_id": "20a0f4822cc6715d50c3af102ca1bb3dea29de60", "bodies": ["self.nums = nums\nself.rnd = random\nself.count = 0", "self.rnd.seed()\nself.count = 0\nridx = -1\nfor i in range(len(self.nums)):\n if self.nums[i] != target:\n continue\n self.count += 1\n if self.rnd.randint(1, self.count) == self.count:\n ridx = i\nreturn ridx"], "bodies_text": "<|body_start_0|>\n self.nums = nums\n self.rnd = random\n self.count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n self.rnd.seed()\n self.count = 0\n ridx = -1\n for i in range(len(self.nums)):\n if self.nums[i] != target:\n continue\n self.count += 1\n if self.rnd.randint(1, self.count) == self.count:\n ridx = i\n return ridx\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n <|body_0|>\n\n def pick(self, target):\n \"\"\":type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.nums = nums\n self.rnd = random\n self.count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n self.rnd.seed()\n self.count = 0\n ridx = -1\n for i in range(len(self.nums)):\n if self.nums[i] != target:\n continue\n self.count += 1\n if self.rnd.randint(1, self.count) == self.count:\n ridx = i\n return ridx\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000020", "length_bytes": 748, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int]", "name": "__init__", "signature": "def __init__(self, nums)"}, {"docstring": ":type target: int :rtype: int", "name": "pick", "signature": "def pick(self, target)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums): :type nums: List[int]\n- def pick(self, target): :type target: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, nums): :type nums: List[int]\n- def pick(self, target): :type target: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n <|body_0|>\n\n def pick(self, target):\n \"\"\":type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.nums = nums\n self.rnd = random\n self.count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n self.rnd.seed()\n self.count = 0\n ridx = -1\n for i in range(len(self.nums)):\n if self.nums[i] != target:\n continue\n self.count += 1\n if self.rnd.randint(1, self.count) == self.count:\n ridx = i\n return ridx\n<|end_body_1|>\n", "revision_id": "4d340a45fb2e9459d47cbe179ebfa7a82e5f1b8c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n <|body_0|>\n\n def pick(self, target):\n \"\"\":type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def __init__(self, nums):\n \"\"\":type nums: List[int]\"\"\"\n self.nums = nums\n self.rnd = random\n self.count = 0\n\n def pick(self, target):\n \"\"\":type target: int :rtype: int\"\"\"\n self.rnd.seed()\n self.count = 0\n ridx = -1\n for i in range(len(self.nums)):\n if self.nums[i] != target:\n continue\n self.count += 1\n if self.rnd.randint(1, self.count) == self.count:\n ridx = i\n return ridx\n", "source": "the_stack_v2_python_sparse", "source_path": "398_RandomPickIndex/solution1.py", "source_repo": "llgeek/leetcode", "split": "test", "star_events_count": 1} {"blob_id": "a8edee3e2f6ead518f7ea331bfbecf00d6c6f145", "bodies": ["self.tokenizer = Tokenizer(oov_token='oovtok', lower=False)\nself.enc2targs = {}\nself.targ2int = {}\nself.train_dir = train_dir\nself.min_examples_per_targ = min_examples_per_targ\nself.n_x_cuis = None if n_x_cuis == 'all' else int(n_x_cuis)\nself.n_y_cuis = None if n_y_cuis == 'all' else int(n_y_cuis)\nself.index()\nif os.path.isdir(model_dir):\n shutil.rmtree(model_dir)\nos.mkdir(model_dir)", "targ_counter = collections.Counter()\nfor disch_file in glob.glob(self.train_dir + '*_discharge.txt'):\n targs = set(read_tokens(disch_file, self.n_y_cuis, None))\n enc_id = disch_file.split('/')[-1].split('_')[0]\n self.enc2targs[enc_id] = targs\n targ_counter.update(targs)\ntotal = 0\nindex = 0\nfor targ, count in targ_counter.items():\n if count > self.min_examples_per_targ:\n total = total + count\n self.targ2int[targ] = index\n index = index + 1\naverage_examples = round(float(total) / len(self.targ2int), 2)\nprint('average examples per target:', average_examples)", "x = []\ny = []\nfor disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n tokens = read_tokens(rest_path, self.n_x_cuis, None)\n x.append(' '.join(tokens))\n targ_vec = numpy.zeros(len(self.targ2int))\n enc_id = disch_path.split('/')[-1].split('_')[0]\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n targ_vec[self.targ2int[targ]] = 1\n y.append(targ_vec)\nself.tokenizer.fit_on_texts(x)\npickle_file = open('Model/tokenizer.p', 'wb')\npickle.dump(self.tokenizer, pickle_file)\nprint('input vocabulary size:', len(self.tokenizer.word_index))\nx = self.tokenizer.texts_to_sequences(x)\nmax_seq_len = max((len(seq) for seq in x))\nx = pad_sequences(x, maxlen=max_seq_len)\nreturn (x, numpy.array(y))", "all_x_sizes = []\nall_y_sizes = []\nfor disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n enc_id = disch_path.split('/')[-1].split('_')[0]\n x_tokens = set(read_tokens(rest_path, self.n_x_cuis, None))\n y_tokens = set()\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n y_tokens.add(targ)\n all_x_sizes.append(len(x_tokens))\n all_y_sizes.append(len(y_tokens))\nprint('aver x size:', round(sum(all_x_sizes) / len(all_x_sizes), 2))\nprint('aver y size:', round(sum(all_y_sizes) / len(all_y_sizes), 2))\nprint('unique targets:', len(self.targ2int))"], "bodies_text": "<|body_start_0|>\n self.tokenizer = Tokenizer(oov_token='oovtok', lower=False)\n self.enc2targs = {}\n self.targ2int = {}\n self.train_dir = train_dir\n self.min_examples_per_targ = min_examples_per_targ\n self.n_x_cuis = None if n_x_cuis == 'all' else int(n_x_cuis)\n self.n_y_cuis = None if n_y_cuis == 'all' else int(n_y_cuis)\n self.index()\n if os.path.isdir(model_dir):\n shutil.rmtree(model_dir)\n os.mkdir(model_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n targ_counter = collections.Counter()\n for disch_file in glob.glob(self.train_dir + '*_discharge.txt'):\n targs = set(read_tokens(disch_file, self.n_y_cuis, None))\n enc_id = disch_file.split('/')[-1].split('_')[0]\n self.enc2targs[enc_id] = targs\n targ_counter.update(targs)\n total = 0\n index = 0\n for targ, count in targ_counter.items():\n if count > self.min_examples_per_targ:\n total = total + count\n self.targ2int[targ] = index\n index = index + 1\n average_examples = round(float(total) / len(self.targ2int), 2)\n print('average examples per target:', average_examples)\n<|end_body_1|>\n\n<|body_start_2|>\n x = []\n y = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n tokens = read_tokens(rest_path, self.n_x_cuis, None)\n x.append(' '.join(tokens))\n targ_vec = numpy.zeros(len(self.targ2int))\n enc_id = disch_path.split('/')[-1].split('_')[0]\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n targ_vec[self.targ2int[targ]] = 1\n y.append(targ_vec)\n self.tokenizer.fit_on_texts(x)\n pickle_file = open('Model/tokenizer.p', 'wb')\n pickle.dump(self.tokenizer, pickle_file)\n print('input vocabulary size:', len(self.tokenizer.word_index))\n x = self.tokenizer.texts_to_sequences(x)\n max_seq_len = max((len(seq) for seq in x))\n x = pad_sequences(x, maxlen=max_seq_len)\n return (x, numpy.array(y))\n<|end_body_2|>\n\n<|body_start_3|>\n all_x_sizes = []\n all_y_sizes = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n enc_id = disch_path.split('/')[-1].split('_')[0]\n x_tokens = set(read_tokens(rest_path, self.n_x_cuis, None))\n y_tokens = set()\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n y_tokens.add(targ)\n all_x_sizes.append(len(x_tokens))\n all_y_sizes.append(len(y_tokens))\n print('aver x size:', round(sum(all_x_sizes) / len(all_x_sizes), 2))\n print('aver y size:', round(sum(all_y_sizes) / len(all_y_sizes), 2))\n print('unique targets:', len(self.targ2int))\n<|end_body_3|>\n", "class_docstring": "Make x and y from raw data", "class_name": "DatasetProvider", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatasetProvider:\n \"\"\"Make x and y from raw data\"\"\"\n\n def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def index(self):\n \"\"\"Process discharge summaries (prediction targets)\"\"\"\n <|body_1|>\n\n def load(self):\n \"\"\"Process notes to make x and y\"\"\"\n <|body_2|>\n\n def stats(self):\n \"\"\"Print various data statistics\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.tokenizer = Tokenizer(oov_token='oovtok', lower=False)\n self.enc2targs = {}\n self.targ2int = {}\n self.train_dir = train_dir\n self.min_examples_per_targ = min_examples_per_targ\n self.n_x_cuis = None if n_x_cuis == 'all' else int(n_x_cuis)\n self.n_y_cuis = None if n_y_cuis == 'all' else int(n_y_cuis)\n self.index()\n if os.path.isdir(model_dir):\n shutil.rmtree(model_dir)\n os.mkdir(model_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n targ_counter = collections.Counter()\n for disch_file in glob.glob(self.train_dir + '*_discharge.txt'):\n targs = set(read_tokens(disch_file, self.n_y_cuis, None))\n enc_id = disch_file.split('/')[-1].split('_')[0]\n self.enc2targs[enc_id] = targs\n targ_counter.update(targs)\n total = 0\n index = 0\n for targ, count in targ_counter.items():\n if count > self.min_examples_per_targ:\n total = total + count\n self.targ2int[targ] = index\n index = index + 1\n average_examples = round(float(total) / len(self.targ2int), 2)\n print('average examples per target:', average_examples)\n<|end_body_1|>\n\n<|body_start_2|>\n x = []\n y = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n tokens = read_tokens(rest_path, self.n_x_cuis, None)\n x.append(' '.join(tokens))\n targ_vec = numpy.zeros(len(self.targ2int))\n enc_id = disch_path.split('/')[-1].split('_')[0]\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n targ_vec[self.targ2int[targ]] = 1\n y.append(targ_vec)\n self.tokenizer.fit_on_texts(x)\n pickle_file = open('Model/tokenizer.p', 'wb')\n pickle.dump(self.tokenizer, pickle_file)\n print('input vocabulary size:', len(self.tokenizer.word_index))\n x = self.tokenizer.texts_to_sequences(x)\n max_seq_len = max((len(seq) for seq in x))\n x = pad_sequences(x, maxlen=max_seq_len)\n return (x, numpy.array(y))\n<|end_body_2|>\n\n<|body_start_3|>\n all_x_sizes = []\n all_y_sizes = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n enc_id = disch_path.split('/')[-1].split('_')[0]\n x_tokens = set(read_tokens(rest_path, self.n_x_cuis, None))\n y_tokens = set()\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n y_tokens.add(targ)\n all_x_sizes.append(len(x_tokens))\n all_y_sizes.append(len(y_tokens))\n print('aver x size:', round(sum(all_x_sizes) / len(all_x_sizes), 2))\n print('aver y size:', round(sum(all_y_sizes) / len(all_y_sizes), 2))\n print('unique targets:', len(self.targ2int))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000021", "length_bytes": 4361, "license_type": "no_license", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ)"}, {"docstring": "Process discharge summaries (prediction targets)", "name": "index", "signature": "def index(self)"}, {"docstring": "Process notes to make x and y", "name": "load", "signature": "def load(self)"}, {"docstring": "Print various data statistics", "name": "stats", "signature": "def stats(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_032092", "prompt": "Implement the Python class `DatasetProvider` described below.\n\nClass description:\nMake x and y from raw data\n\nMethod signatures and docstrings:\n- def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ): Constructor\n- def index(self): Process discharge summaries (prediction targets)\n- def load(self): Process notes to make x and y\n- def stats(self): Print various data statistics", "prompted_full_text": "Implement the Python class `DatasetProvider` described below.\n\nClass description:\nMake x and y from raw data\n\nMethod signatures and docstrings:\n- def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ): Constructor\n- def index(self): Process discharge summaries (prediction targets)\n- def load(self): Process notes to make x and y\n- def stats(self): Print various data statistics\n\n<|skeleton|>\nclass DatasetProvider:\n \"\"\"Make x and y from raw data\"\"\"\n\n def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def index(self):\n \"\"\"Process discharge summaries (prediction targets)\"\"\"\n <|body_1|>\n\n def load(self):\n \"\"\"Process notes to make x and y\"\"\"\n <|body_2|>\n\n def stats(self):\n \"\"\"Print various data statistics\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.tokenizer = Tokenizer(oov_token='oovtok', lower=False)\n self.enc2targs = {}\n self.targ2int = {}\n self.train_dir = train_dir\n self.min_examples_per_targ = min_examples_per_targ\n self.n_x_cuis = None if n_x_cuis == 'all' else int(n_x_cuis)\n self.n_y_cuis = None if n_y_cuis == 'all' else int(n_y_cuis)\n self.index()\n if os.path.isdir(model_dir):\n shutil.rmtree(model_dir)\n os.mkdir(model_dir)\n<|end_body_0|>\n\n<|body_start_1|>\n targ_counter = collections.Counter()\n for disch_file in glob.glob(self.train_dir + '*_discharge.txt'):\n targs = set(read_tokens(disch_file, self.n_y_cuis, None))\n enc_id = disch_file.split('/')[-1].split('_')[0]\n self.enc2targs[enc_id] = targs\n targ_counter.update(targs)\n total = 0\n index = 0\n for targ, count in targ_counter.items():\n if count > self.min_examples_per_targ:\n total = total + count\n self.targ2int[targ] = index\n index = index + 1\n average_examples = round(float(total) / len(self.targ2int), 2)\n print('average examples per target:', average_examples)\n<|end_body_1|>\n\n<|body_start_2|>\n x = []\n y = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n tokens = read_tokens(rest_path, self.n_x_cuis, None)\n x.append(' '.join(tokens))\n targ_vec = numpy.zeros(len(self.targ2int))\n enc_id = disch_path.split('/')[-1].split('_')[0]\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n targ_vec[self.targ2int[targ]] = 1\n y.append(targ_vec)\n self.tokenizer.fit_on_texts(x)\n pickle_file = open('Model/tokenizer.p', 'wb')\n pickle.dump(self.tokenizer, pickle_file)\n print('input vocabulary size:', len(self.tokenizer.word_index))\n x = self.tokenizer.texts_to_sequences(x)\n max_seq_len = max((len(seq) for seq in x))\n x = pad_sequences(x, maxlen=max_seq_len)\n return (x, numpy.array(y))\n<|end_body_2|>\n\n<|body_start_3|>\n all_x_sizes = []\n all_y_sizes = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n enc_id = disch_path.split('/')[-1].split('_')[0]\n x_tokens = set(read_tokens(rest_path, self.n_x_cuis, None))\n y_tokens = set()\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n y_tokens.add(targ)\n all_x_sizes.append(len(x_tokens))\n all_y_sizes.append(len(y_tokens))\n print('aver x size:', round(sum(all_x_sizes) / len(all_x_sizes), 2))\n print('aver y size:', round(sum(all_y_sizes) / len(all_y_sizes), 2))\n print('unique targets:', len(self.targ2int))\n<|end_body_3|>\n", "revision_id": "4fcb7aa9c5f7ed41277f6b369aff3b36ad47a118", "skeleton": "<|skeleton|>\nclass DatasetProvider:\n \"\"\"Make x and y from raw data\"\"\"\n\n def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def index(self):\n \"\"\"Process discharge summaries (prediction targets)\"\"\"\n <|body_1|>\n\n def load(self):\n \"\"\"Process notes to make x and y\"\"\"\n <|body_2|>\n\n def stats(self):\n \"\"\"Print various data statistics\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DatasetProvider:\n \"\"\"Make x and y from raw data\"\"\"\n\n def __init__(self, train_dir, model_dir, n_x_cuis, n_y_cuis, min_examples_per_targ):\n \"\"\"Constructor\"\"\"\n self.tokenizer = Tokenizer(oov_token='oovtok', lower=False)\n self.enc2targs = {}\n self.targ2int = {}\n self.train_dir = train_dir\n self.min_examples_per_targ = min_examples_per_targ\n self.n_x_cuis = None if n_x_cuis == 'all' else int(n_x_cuis)\n self.n_y_cuis = None if n_y_cuis == 'all' else int(n_y_cuis)\n self.index()\n if os.path.isdir(model_dir):\n shutil.rmtree(model_dir)\n os.mkdir(model_dir)\n\n def index(self):\n \"\"\"Process discharge summaries (prediction targets)\"\"\"\n targ_counter = collections.Counter()\n for disch_file in glob.glob(self.train_dir + '*_discharge.txt'):\n targs = set(read_tokens(disch_file, self.n_y_cuis, None))\n enc_id = disch_file.split('/')[-1].split('_')[0]\n self.enc2targs[enc_id] = targs\n targ_counter.update(targs)\n total = 0\n index = 0\n for targ, count in targ_counter.items():\n if count > self.min_examples_per_targ:\n total = total + count\n self.targ2int[targ] = index\n index = index + 1\n average_examples = round(float(total) / len(self.targ2int), 2)\n print('average examples per target:', average_examples)\n\n def load(self):\n \"\"\"Process notes to make x and y\"\"\"\n x = []\n y = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n tokens = read_tokens(rest_path, self.n_x_cuis, None)\n x.append(' '.join(tokens))\n targ_vec = numpy.zeros(len(self.targ2int))\n enc_id = disch_path.split('/')[-1].split('_')[0]\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n targ_vec[self.targ2int[targ]] = 1\n y.append(targ_vec)\n self.tokenizer.fit_on_texts(x)\n pickle_file = open('Model/tokenizer.p', 'wb')\n pickle.dump(self.tokenizer, pickle_file)\n print('input vocabulary size:', len(self.tokenizer.word_index))\n x = self.tokenizer.texts_to_sequences(x)\n max_seq_len = max((len(seq) for seq in x))\n x = pad_sequences(x, maxlen=max_seq_len)\n return (x, numpy.array(y))\n\n def stats(self):\n \"\"\"Print various data statistics\"\"\"\n all_x_sizes = []\n all_y_sizes = []\n for disch_path in glob.glob(self.train_dir + '*_discharge.txt'):\n rest_path = disch_path.split('_')[0] + '_rest.txt'\n if not os.path.exists(rest_path):\n continue\n enc_id = disch_path.split('/')[-1].split('_')[0]\n x_tokens = set(read_tokens(rest_path, self.n_x_cuis, None))\n y_tokens = set()\n for targ in self.enc2targs[enc_id]:\n if targ in self.targ2int:\n y_tokens.add(targ)\n all_x_sizes.append(len(x_tokens))\n all_y_sizes.append(len(y_tokens))\n print('aver x size:', round(sum(all_x_sizes) / len(all_x_sizes), 2))\n print('aver y size:', round(sum(all_y_sizes) / len(all_y_sizes), 2))\n print('unique targets:', len(self.targ2int))\n", "source": "the_stack_v2_python_sparse", "source_path": "Archive/MultLabel/dataset.py", "source_repo": "dmitriydligach/Universal", "split": "test", "star_events_count": 1} {"blob_id": "d1732df6973e57ea107066174899d57752e42292", "bodies": ["self.data = ''\n\ndef dfs(node):\n if node:\n self.data += '%s ' % node.val\n dfs(node.left)\n dfs(node.right)\n else:\n self.data += '# '\ndfs(root)\nreturn self.data", "def dfs():\n val = next(data)\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = dfs()\n node.right = dfs()\n return node\ndata = iter(data.split())\nreturn dfs()"], "bodies_text": "<|body_start_0|>\n self.data = ''\n\n def dfs(node):\n if node:\n self.data += '%s ' % node.val\n dfs(node.left)\n dfs(node.right)\n else:\n self.data += '# '\n dfs(root)\n return self.data\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs():\n val = next(data)\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = dfs()\n node.right = dfs()\n return node\n data = iter(data.split())\n return dfs()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = ''\n\n def dfs(node):\n if node:\n self.data += '%s ' % node.val\n dfs(node.left)\n dfs(node.right)\n else:\n self.data += '# '\n dfs(root)\n return self.data\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs():\n val = next(data)\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = dfs()\n node.right = dfs()\n return node\n data = iter(data.split())\n return dfs()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000022", "length_bytes": 1568, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006057", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data = ''\n\n def dfs(node):\n if node:\n self.data += '%s ' % node.val\n dfs(node.left)\n dfs(node.right)\n else:\n self.data += '# '\n dfs(root)\n return self.data\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs():\n val = next(data)\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = dfs()\n node.right = dfs()\n return node\n data = iter(data.split())\n return dfs()\n<|end_body_1|>\n", "revision_id": "2a03499ed0b403d79f6c8451c9a839991b23e188", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n self.data = ''\n\n def dfs(node):\n if node:\n self.data += '%s ' % node.val\n dfs(node.left)\n dfs(node.right)\n else:\n self.data += '# '\n dfs(root)\n return self.data\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n def dfs():\n val = next(data)\n if val == '#':\n return None\n node = TreeNode(int(val))\n node.left = dfs()\n node.right = dfs()\n return node\n data = iter(data.split())\n return dfs()\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/297_serialize_deserialize_binary_tree.py", "source_repo": "leetcode-notes/daily-algorithms-practice", "split": "test", "star_events_count": 0} {"blob_id": "07fd42ca106064ccdc698c729db604b37febb5fa", "bodies": ["rate = Rate.objects.filter(currency=currency, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\nif not rate:\n return None\nresult = {'usd': rate.usd, 'btc': rate.btc}\nreturn result", "rate = Rate.objects.filter(currency__code=currency_code, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\nif not rate:\n return None\nresult = {'usd': rate.usd, 'btc': rate.btc}\nreturn result"], "bodies_text": "<|body_start_0|>\n rate = Rate.objects.filter(currency=currency, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n rate = Rate.objects.filter(currency__code=currency_code, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Rate", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Rate:\n\n def get_actual_rates(currency):\n \"\"\"Get closest currency rate for date\"\"\"\n <|body_0|>\n\n def get_rate_by_code(currency_code):\n \"\"\"Get closest currency rate for date\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rate = Rate.objects.filter(currency=currency, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n rate = Rate.objects.filter(currency__code=currency_code, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000023", "length_bytes": 4281, "license_type": "no_license", "methods": [{"docstring": "Get closest currency rate for date", "name": "get_actual_rates", "signature": "def get_actual_rates(currency)"}, {"docstring": "Get closest currency rate for date", "name": "get_rate_by_code", "signature": "def get_rate_by_code(currency_code)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003201", "prompt": "Implement the Python class `Rate` described below.\n\nClass description:\nImplement the Rate class.\n\nMethod signatures and docstrings:\n- def get_actual_rates(currency): Get closest currency rate for date\n- def get_rate_by_code(currency_code): Get closest currency rate for date", "prompted_full_text": "Implement the Python class `Rate` described below.\n\nClass description:\nImplement the Rate class.\n\nMethod signatures and docstrings:\n- def get_actual_rates(currency): Get closest currency rate for date\n- def get_rate_by_code(currency_code): Get closest currency rate for date\n\n<|skeleton|>\nclass Rate:\n\n def get_actual_rates(currency):\n \"\"\"Get closest currency rate for date\"\"\"\n <|body_0|>\n\n def get_rate_by_code(currency_code):\n \"\"\"Get closest currency rate for date\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rate = Rate.objects.filter(currency=currency, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n rate = Rate.objects.filter(currency__code=currency_code, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n<|end_body_1|>\n", "revision_id": "601b4ccf17475e349b8d4cc1834e2c383554d165", "skeleton": "<|skeleton|>\nclass Rate:\n\n def get_actual_rates(currency):\n \"\"\"Get closest currency rate for date\"\"\"\n <|body_0|>\n\n def get_rate_by_code(currency_code):\n \"\"\"Get closest currency rate for date\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Rate:\n def get_actual_rates(currency):\n \"\"\"Get closest currency rate for date\"\"\"\n rate = Rate.objects.filter(currency=currency, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n\n def get_rate_by_code(currency_code):\n \"\"\"Get closest currency rate for date\"\"\"\n rate = Rate.objects.filter(currency__code=currency_code, usd__isnull=False, btc__isnull=False).order_by('-datetime').first()\n if not rate:\n return None\n result = {'usd': rate.usd, 'btc': rate.btc}\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "src/wallet/models/currency.py", "source_repo": "paradigm-citadel/Citadel-Backend", "split": "test", "star_events_count": 0} {"blob_id": "55596d8d7b478dbec42c4046bcb78c8e6e34a5d8", "bodies": ["if self._started:\n raise RuntimeError('The profiler was already started. It cannot be done again.')\nself._frames[0] = inspect.currentframe()\nself._started = True\nself._copy_cache = False\nself._buffer.log_event(-1, -1, 100, 0, 0)", "if not self._started:\n raise RuntimeError('The profiler was not started. It must be done first.')\nself._buffer.log_event(-1, -1, 101, 0, 0)\nself._started = False"], "bodies_text": "<|body_start_0|>\n if self._started:\n raise RuntimeError('The profiler was already started. It cannot be done again.')\n self._frames[0] = inspect.currentframe()\n self._started = True\n self._copy_cache = False\n self._buffer.log_event(-1, -1, 100, 0, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._started:\n raise RuntimeError('The profiler was not started. It must be done first.')\n self._buffer.log_event(-1, -1, 101, 0, 0)\n self._started = False\n<|end_body_1|>\n", "class_docstring": "One class to measure time wasted by profiling.", "class_name": "EventProfilerDebug", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EventProfilerDebug:\n \"\"\"One class to measure time wasted by profiling.\"\"\"\n\n def start(self):\n \"\"\"Starts the profiling without enabling it.\"\"\"\n <|body_0|>\n\n def stop(self):\n \"\"\"Stops the unstarted profiling.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self._started:\n raise RuntimeError('The profiler was already started. It cannot be done again.')\n self._frames[0] = inspect.currentframe()\n self._started = True\n self._copy_cache = False\n self._buffer.log_event(-1, -1, 100, 0, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._started:\n raise RuntimeError('The profiler was not started. It must be done first.')\n self._buffer.log_event(-1, -1, 101, 0, 0)\n self._started = False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000024", "length_bytes": 13879, "license_type": "permissive", "methods": [{"docstring": "Starts the profiling without enabling it.", "name": "start", "signature": "def start(self)"}, {"docstring": "Stops the unstarted profiling.", "name": "stop", "signature": "def stop(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019313", "prompt": "Implement the Python class `EventProfilerDebug` described below.\n\nClass description:\nOne class to measure time wasted by profiling.\n\nMethod signatures and docstrings:\n- def start(self): Starts the profiling without enabling it.\n- def stop(self): Stops the unstarted profiling.", "prompted_full_text": "Implement the Python class `EventProfilerDebug` described below.\n\nClass description:\nOne class to measure time wasted by profiling.\n\nMethod signatures and docstrings:\n- def start(self): Starts the profiling without enabling it.\n- def stop(self): Stops the unstarted profiling.\n\n<|skeleton|>\nclass EventProfilerDebug:\n \"\"\"One class to measure time wasted by profiling.\"\"\"\n\n def start(self):\n \"\"\"Starts the profiling without enabling it.\"\"\"\n <|body_0|>\n\n def stop(self):\n \"\"\"Stops the unstarted profiling.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self._started:\n raise RuntimeError('The profiler was already started. It cannot be done again.')\n self._frames[0] = inspect.currentframe()\n self._started = True\n self._copy_cache = False\n self._buffer.log_event(-1, -1, 100, 0, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n if not self._started:\n raise RuntimeError('The profiler was not started. It must be done first.')\n self._buffer.log_event(-1, -1, 101, 0, 0)\n self._started = False\n<|end_body_1|>\n", "revision_id": "9d52a497e72d2807e87a730b38db1843f3a098ed", "skeleton": "<|skeleton|>\nclass EventProfilerDebug:\n \"\"\"One class to measure time wasted by profiling.\"\"\"\n\n def start(self):\n \"\"\"Starts the profiling without enabling it.\"\"\"\n <|body_0|>\n\n def stop(self):\n \"\"\"Stops the unstarted profiling.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EventProfilerDebug:\n \"\"\"One class to measure time wasted by profiling.\"\"\"\n\n def start(self):\n \"\"\"Starts the profiling without enabling it.\"\"\"\n if self._started:\n raise RuntimeError('The profiler was already started. It cannot be done again.')\n self._frames[0] = inspect.currentframe()\n self._started = True\n self._copy_cache = False\n self._buffer.log_event(-1, -1, 100, 0, 0)\n\n def stop(self):\n \"\"\"Stops the unstarted profiling.\"\"\"\n if not self._started:\n raise RuntimeError('The profiler was not started. It must be done first.')\n self._buffer.log_event(-1, -1, 101, 0, 0)\n self._started = False\n", "source": "the_stack_v2_python_sparse", "source_path": "cpyquickhelper/profiling/event_profiler.py", "source_repo": "sdpython/cpyquickhelper", "split": "test", "star_events_count": 2} {"blob_id": "9693defee08a77e9ee2e94b800a3b62433a78c2e", "bodies": ["super(TwolayerNet, self).__init__()\nself.linear1 = torch.nn.Linear(D_in, H)\nself.relu = torch.nn.ReLU()\nself.linear2 = torch.nn.Linear(H, D_out)", "h_relu = self.linear1(x).clamp(min=0)\ny_pred = self.linear2(h_relu)\nreturn y_pred"], "bodies_text": "<|body_start_0|>\n super(TwolayerNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.relu = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(H, D_out)\n<|end_body_0|>\n\n<|body_start_1|>\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TwolayerNet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TwolayerNet:\n\n def __init__(self, D_in, H, D_out):\n \"\"\"we assign the follwoing paramenters: :param D_in: :param H: :param D_out:\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TwolayerNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.relu = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(H, D_out)\n<|end_body_0|>\n\n<|body_start_1|>\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000025", "length_bytes": 1879, "license_type": "no_license", "methods": [{"docstring": "we assign the follwoing paramenters: :param D_in: :param H: :param D_out:", "name": "__init__", "signature": "def __init__(self, D_in, H, D_out)"}, {"docstring": "in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026100", "prompt": "Implement the Python class `TwolayerNet` described below.\n\nClass description:\nImplement the TwolayerNet class.\n\nMethod signatures and docstrings:\n- def __init__(self, D_in, H, D_out): we assign the follwoing paramenters: :param D_in: :param H: :param D_out:\n- def forward(self, x): in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:", "prompted_full_text": "Implement the Python class `TwolayerNet` described below.\n\nClass description:\nImplement the TwolayerNet class.\n\nMethod signatures and docstrings:\n- def __init__(self, D_in, H, D_out): we assign the follwoing paramenters: :param D_in: :param H: :param D_out:\n- def forward(self, x): in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:\n\n<|skeleton|>\nclass TwolayerNet:\n\n def __init__(self, D_in, H, D_out):\n \"\"\"we assign the follwoing paramenters: :param D_in: :param H: :param D_out:\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TwolayerNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.relu = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(H, D_out)\n<|end_body_0|>\n\n<|body_start_1|>\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred\n<|end_body_1|>\n", "revision_id": "1e19b21ecfc05218f6201da88854231300378905", "skeleton": "<|skeleton|>\nclass TwolayerNet:\n\n def __init__(self, D_in, H, D_out):\n \"\"\"we assign the follwoing paramenters: :param D_in: :param H: :param D_out:\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TwolayerNet:\n def __init__(self, D_in, H, D_out):\n \"\"\"we assign the follwoing paramenters: :param D_in: :param H: :param D_out:\"\"\"\n super(TwolayerNet, self).__init__()\n self.linear1 = torch.nn.Linear(D_in, H)\n self.relu = torch.nn.ReLU()\n self.linear2 = torch.nn.Linear(H, D_out)\n\n def forward(self, x):\n \"\"\"in the forward function we accept a Tensor of input datand we must return a tensor of output data. We can use Modules defined in the constructor :param x: :return:\"\"\"\n h_relu = self.linear1(x).clamp(min=0)\n y_pred = self.linear2(h_relu)\n return y_pred\n", "source": "the_stack_v2_python_sparse", "source_path": "neural_network/pytorch_custom_learning.py", "source_repo": "Bannerli/Machine_Learning_Algorithms", "split": "test", "star_events_count": 0} {"blob_id": "cfcc50c14893e9388ca33733ef40f33689144a99", "bodies": ["debug('BlockObject.__str__ %s, %s, %s', self.name, self.args, self.opts)\ncode = ''\ncode += self._get_begin_code()\nfor opt in self.opts:\n if isinstance(opt, SceneItem):\n code += str(opt)\n else:\n code += self._get_line(str(opt))\ncode += self._get_end_code()\ndebug('BlockObject.__str__ Code: \\n%s', code)\nreturn code", "code = ' ' * self.indentation.get() + self.name + self._block_begin()\nif self.args:\n code = code + self._get_line(', '.join([str(arg) for arg in self.args]))\nreturn code", "code = ''\nkwargs = self.kwargs.items()\nkwargs.reverse()\nfor key, val in kwargs:\n if val is True:\n code += self._get_line('%s' % key)\n else:\n code += self._get_line('%s %s' % (key, str(val).strip()))\ncode += self._block_end()\nreturn code"], "bodies_text": "<|body_start_0|>\n debug('BlockObject.__str__ %s, %s, %s', self.name, self.args, self.opts)\n code = ''\n code += self._get_begin_code()\n for opt in self.opts:\n if isinstance(opt, SceneItem):\n code += str(opt)\n else:\n code += self._get_line(str(opt))\n code += self._get_end_code()\n debug('BlockObject.__str__ Code: \\n%s', code)\n return code\n<|end_body_0|>\n\n<|body_start_1|>\n code = ' ' * self.indentation.get() + self.name + self._block_begin()\n if self.args:\n code = code + self._get_line(', '.join([str(arg) for arg in self.args]))\n return code\n<|end_body_1|>\n\n<|body_start_2|>\n code = ''\n kwargs = self.kwargs.items()\n kwargs.reverse()\n for key, val in kwargs:\n if val is True:\n code += self._get_line('%s' % key)\n else:\n code += self._get_line('%s %s' % (key, str(val).strip()))\n code += self._block_end()\n return code\n<|end_body_2|>\n", "class_docstring": "provides methods to generate code blocks.", "class_name": "BlockObject", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlockObject:\n \"\"\"provides methods to generate code blocks.\"\"\"\n\n def __str__(self):\n \"\"\"return PoV code as string representation.\"\"\"\n <|body_0|>\n\n def _get_begin_code(self):\n \"\"\"Start block of code.\"\"\"\n <|body_1|>\n\n def _get_end_code(self):\n \"\"\"End block of code.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n debug('BlockObject.__str__ %s, %s, %s', self.name, self.args, self.opts)\n code = ''\n code += self._get_begin_code()\n for opt in self.opts:\n if isinstance(opt, SceneItem):\n code += str(opt)\n else:\n code += self._get_line(str(opt))\n code += self._get_end_code()\n debug('BlockObject.__str__ Code: \\n%s', code)\n return code\n<|end_body_0|>\n\n<|body_start_1|>\n code = ' ' * self.indentation.get() + self.name + self._block_begin()\n if self.args:\n code = code + self._get_line(', '.join([str(arg) for arg in self.args]))\n return code\n<|end_body_1|>\n\n<|body_start_2|>\n code = ''\n kwargs = self.kwargs.items()\n kwargs.reverse()\n for key, val in kwargs:\n if val is True:\n code += self._get_line('%s' % key)\n else:\n code += self._get_line('%s %s' % (key, str(val).strip()))\n code += self._block_end()\n return code\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000026", "length_bytes": 1718, "license_type": "no_license", "methods": [{"docstring": "return PoV code as string representation.", "name": "__str__", "signature": "def __str__(self)"}, {"docstring": "Start block of code.", "name": "_get_begin_code", "signature": "def _get_begin_code(self)"}, {"docstring": "End block of code.", "name": "_get_end_code", "signature": "def _get_end_code(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_029444", "prompt": "Implement the Python class `BlockObject` described below.\n\nClass description:\nprovides methods to generate code blocks.\n\nMethod signatures and docstrings:\n- def __str__(self): return PoV code as string representation.\n- def _get_begin_code(self): Start block of code.\n- def _get_end_code(self): End block of code.", "prompted_full_text": "Implement the Python class `BlockObject` described below.\n\nClass description:\nprovides methods to generate code blocks.\n\nMethod signatures and docstrings:\n- def __str__(self): return PoV code as string representation.\n- def _get_begin_code(self): Start block of code.\n- def _get_end_code(self): End block of code.\n\n<|skeleton|>\nclass BlockObject:\n \"\"\"provides methods to generate code blocks.\"\"\"\n\n def __str__(self):\n \"\"\"return PoV code as string representation.\"\"\"\n <|body_0|>\n\n def _get_begin_code(self):\n \"\"\"Start block of code.\"\"\"\n <|body_1|>\n\n def _get_end_code(self):\n \"\"\"End block of code.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n debug('BlockObject.__str__ %s, %s, %s', self.name, self.args, self.opts)\n code = ''\n code += self._get_begin_code()\n for opt in self.opts:\n if isinstance(opt, SceneItem):\n code += str(opt)\n else:\n code += self._get_line(str(opt))\n code += self._get_end_code()\n debug('BlockObject.__str__ Code: \\n%s', code)\n return code\n<|end_body_0|>\n\n<|body_start_1|>\n code = ' ' * self.indentation.get() + self.name + self._block_begin()\n if self.args:\n code = code + self._get_line(', '.join([str(arg) for arg in self.args]))\n return code\n<|end_body_1|>\n\n<|body_start_2|>\n code = ''\n kwargs = self.kwargs.items()\n kwargs.reverse()\n for key, val in kwargs:\n if val is True:\n code += self._get_line('%s' % key)\n else:\n code += self._get_line('%s %s' % (key, str(val).strip()))\n code += self._block_end()\n return code\n<|end_body_2|>\n", "revision_id": "27c5d3f78c545ad01ecd6388cebb8326d164cbd0", "skeleton": "<|skeleton|>\nclass BlockObject:\n \"\"\"provides methods to generate code blocks.\"\"\"\n\n def __str__(self):\n \"\"\"return PoV code as string representation.\"\"\"\n <|body_0|>\n\n def _get_begin_code(self):\n \"\"\"Start block of code.\"\"\"\n <|body_1|>\n\n def _get_end_code(self):\n \"\"\"End block of code.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BlockObject:\n \"\"\"provides methods to generate code blocks.\"\"\"\n\n def __str__(self):\n \"\"\"return PoV code as string representation.\"\"\"\n debug('BlockObject.__str__ %s, %s, %s', self.name, self.args, self.opts)\n code = ''\n code += self._get_begin_code()\n for opt in self.opts:\n if isinstance(opt, SceneItem):\n code += str(opt)\n else:\n code += self._get_line(str(opt))\n code += self._get_end_code()\n debug('BlockObject.__str__ Code: \\n%s', code)\n return code\n\n def _get_begin_code(self):\n \"\"\"Start block of code.\"\"\"\n code = ' ' * self.indentation.get() + self.name + self._block_begin()\n if self.args:\n code = code + self._get_line(', '.join([str(arg) for arg in self.args]))\n return code\n\n def _get_end_code(self):\n \"\"\"End block of code.\"\"\"\n code = ''\n kwargs = self.kwargs.items()\n kwargs.reverse()\n for key, val in kwargs:\n if val is True:\n code += self._get_line('%s' % key)\n else:\n code += self._get_line('%s %s' % (key, str(val).strip()))\n code += self._block_end()\n return code\n", "source": "the_stack_v2_python_sparse", "source_path": "pov/basic/BlockObject.py", "source_repo": "pennyarcade/py_pov", "split": "test", "star_events_count": 0} {"blob_id": "8a1caa2578549f47287771d4c4c0585eb1df4541", "bodies": ["self.np_random = np_random\nself.discard_pile = []\nself.shuffled_deck = utils.get_deck()\nself.np_random.shuffle(self.shuffled_deck)\nself.stock_pile = self.shuffled_deck.copy()", "for _ in range(num):\n player.hand.append(self.stock_pile.pop())\nplayer.did_populate_hand()"], "bodies_text": "<|body_start_0|>\n self.np_random = np_random\n self.discard_pile = []\n self.shuffled_deck = utils.get_deck()\n self.np_random.shuffle(self.shuffled_deck)\n self.stock_pile = self.shuffled_deck.copy()\n<|end_body_0|>\n\n<|body_start_1|>\n for _ in range(num):\n player.hand.append(self.stock_pile.pop())\n player.did_populate_hand()\n<|end_body_1|>\n", "class_docstring": "Initialize a GinRummy dealer class", "class_name": "GinRummyDealer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GinRummyDealer:\n \"\"\"Initialize a GinRummy dealer class\"\"\"\n\n def __init__(self, np_random):\n \"\"\"Empty discard_pile, set shuffled_deck, set stock_pile\"\"\"\n <|body_0|>\n\n def deal_cards(self, player: GinRummyPlayer, num: int):\n \"\"\"Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.np_random = np_random\n self.discard_pile = []\n self.shuffled_deck = utils.get_deck()\n self.np_random.shuffle(self.shuffled_deck)\n self.stock_pile = self.shuffled_deck.copy()\n<|end_body_0|>\n\n<|body_start_1|>\n for _ in range(num):\n player.hand.append(self.stock_pile.pop())\n player.did_populate_hand()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000027", "length_bytes": 1049, "license_type": "permissive", "methods": [{"docstring": "Empty discard_pile, set shuffled_deck, set stock_pile", "name": "__init__", "signature": "def __init__(self, np_random)"}, {"docstring": "Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt", "name": "deal_cards", "signature": "def deal_cards(self, player: GinRummyPlayer, num: int)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040687", "prompt": "Implement the Python class `GinRummyDealer` described below.\n\nClass description:\nInitialize a GinRummy dealer class\n\nMethod signatures and docstrings:\n- def __init__(self, np_random): Empty discard_pile, set shuffled_deck, set stock_pile\n- def deal_cards(self, player: GinRummyPlayer, num: int): Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt", "prompted_full_text": "Implement the Python class `GinRummyDealer` described below.\n\nClass description:\nInitialize a GinRummy dealer class\n\nMethod signatures and docstrings:\n- def __init__(self, np_random): Empty discard_pile, set shuffled_deck, set stock_pile\n- def deal_cards(self, player: GinRummyPlayer, num: int): Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt\n\n<|skeleton|>\nclass GinRummyDealer:\n \"\"\"Initialize a GinRummy dealer class\"\"\"\n\n def __init__(self, np_random):\n \"\"\"Empty discard_pile, set shuffled_deck, set stock_pile\"\"\"\n <|body_0|>\n\n def deal_cards(self, player: GinRummyPlayer, num: int):\n \"\"\"Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.np_random = np_random\n self.discard_pile = []\n self.shuffled_deck = utils.get_deck()\n self.np_random.shuffle(self.shuffled_deck)\n self.stock_pile = self.shuffled_deck.copy()\n<|end_body_0|>\n\n<|body_start_1|>\n for _ in range(num):\n player.hand.append(self.stock_pile.pop())\n player.did_populate_hand()\n<|end_body_1|>\n", "revision_id": "7fc56edebe9a2e39c94f872edd8dbe325c61b806", "skeleton": "<|skeleton|>\nclass GinRummyDealer:\n \"\"\"Initialize a GinRummy dealer class\"\"\"\n\n def __init__(self, np_random):\n \"\"\"Empty discard_pile, set shuffled_deck, set stock_pile\"\"\"\n <|body_0|>\n\n def deal_cards(self, player: GinRummyPlayer, num: int):\n \"\"\"Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GinRummyDealer:\n \"\"\"Initialize a GinRummy dealer class\"\"\"\n\n def __init__(self, np_random):\n \"\"\"Empty discard_pile, set shuffled_deck, set stock_pile\"\"\"\n self.np_random = np_random\n self.discard_pile = []\n self.shuffled_deck = utils.get_deck()\n self.np_random.shuffle(self.shuffled_deck)\n self.stock_pile = self.shuffled_deck.copy()\n\n def deal_cards(self, player: GinRummyPlayer, num: int):\n \"\"\"Deal some cards from stock_pile to one player Args: player (GinRummyPlayer): The GinRummyPlayer object num (int): The number of cards to be dealt\"\"\"\n for _ in range(num):\n player.hand.append(self.stock_pile.pop())\n player.did_populate_hand()\n", "source": "the_stack_v2_python_sparse", "source_path": "rlcard/games/gin_rummy/dealer.py", "source_repo": "datamllab/rlcard", "split": "test", "star_events_count": 2447} {"blob_id": "7f4a5857020dcfd981b4c7051abc58e4bf672be0", "bodies": ["requesition_obj = self.pool.get('purchase.requisition')\nrequesition = requesition_obj.browse(cr, uid, context['request_id'])\nsubject = 'RFQ For ' + requesition.name\nir_mail = self.pool.get('ir.mail_server')\npartner_obj = self.pool.get('res.partner')\npartner_ids = requesition_obj.get_partner_ids(cr, uid, ids, requesition, context=context)\nif context['server_mail_id']:\n try:\n mail_rec = ir_mail.browse(cr, uid, context['server_mail_id'])\n user_name = str(mail_rec.smtp_user)\n password = mail_rec.smtp_pass\n smtp_host = str(mail_rec.smtp_host)\n smtp_port = mail_rec.smtp_port\n smtpObj = smtplib.SMTP(host=str(smtp_host), port=25)\n smtpObj.set_debuglevel(1)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(user=user_name, password=password)\n for partner in partner_obj.browse(cr, uid, partner_ids):\n if partner.email:\n Text = 'Dear ' + partner.name + ' ,,, \\n\\n This Message From ' + requesition.company_id.name + '\\n' + context['message']\n message = 'Subject: %s\\n\\n%s' % (subject, Text)\n smtpObj.sendmail(user_name, partner.email, message)\n except ValueError:\n print('Oops! That Problem in Username Or Password .Recheck the Outgoing Mail Configration ...')\nreturn True", "if context is None:\n context = {}\nreq_obj = self.pool.get('purchase.requisition')\nusers = self.pool.get('res.users')\nres = {}\nreq_id = context.get('active_ids', [])\nreq_rec = req_obj.browse(cr, uid, req_id, context=context)[0]\nusers.browse(cr, uid, uid).id\nres.update({'company_id': req_rec.company_id.id, 'request_id': req_rec.id, 'user_id': users.browse(cr, uid, uid).id})\nreturn res"], "bodies_text": "<|body_start_0|>\n requesition_obj = self.pool.get('purchase.requisition')\n requesition = requesition_obj.browse(cr, uid, context['request_id'])\n subject = 'RFQ For ' + requesition.name\n ir_mail = self.pool.get('ir.mail_server')\n partner_obj = self.pool.get('res.partner')\n partner_ids = requesition_obj.get_partner_ids(cr, uid, ids, requesition, context=context)\n if context['server_mail_id']:\n try:\n mail_rec = ir_mail.browse(cr, uid, context['server_mail_id'])\n user_name = str(mail_rec.smtp_user)\n password = mail_rec.smtp_pass\n smtp_host = str(mail_rec.smtp_host)\n smtp_port = mail_rec.smtp_port\n smtpObj = smtplib.SMTP(host=str(smtp_host), port=25)\n smtpObj.set_debuglevel(1)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(user=user_name, password=password)\n for partner in partner_obj.browse(cr, uid, partner_ids):\n if partner.email:\n Text = 'Dear ' + partner.name + ' ,,, \\n\\n This Message From ' + requesition.company_id.name + '\\n' + context['message']\n message = 'Subject: %s\\n\\n%s' % (subject, Text)\n smtpObj.sendmail(user_name, partner.email, message)\n except ValueError:\n print('Oops! That Problem in Username Or Password .Recheck the Outgoing Mail Configration ...')\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n req_obj = self.pool.get('purchase.requisition')\n users = self.pool.get('res.users')\n res = {}\n req_id = context.get('active_ids', [])\n req_rec = req_obj.browse(cr, uid, req_id, context=context)[0]\n users.browse(cr, uid, uid).id\n res.update({'company_id': req_rec.company_id.id, 'request_id': req_rec.id, 'user_id': users.browse(cr, uid, uid).id})\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "purchase_send_email_quotation_wizard", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass purchase_send_email_quotation_wizard:\n\n def send_message(self, cr, uid, ids, context=None):\n \"\"\"This Method for send Email For Suppliers\"\"\"\n <|body_0|>\n\n def default_get(self, cr, uid, fields, context=None):\n \"\"\"To get default values for the object. @return: A dictionary which of fields with values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n requesition_obj = self.pool.get('purchase.requisition')\n requesition = requesition_obj.browse(cr, uid, context['request_id'])\n subject = 'RFQ For ' + requesition.name\n ir_mail = self.pool.get('ir.mail_server')\n partner_obj = self.pool.get('res.partner')\n partner_ids = requesition_obj.get_partner_ids(cr, uid, ids, requesition, context=context)\n if context['server_mail_id']:\n try:\n mail_rec = ir_mail.browse(cr, uid, context['server_mail_id'])\n user_name = str(mail_rec.smtp_user)\n password = mail_rec.smtp_pass\n smtp_host = str(mail_rec.smtp_host)\n smtp_port = mail_rec.smtp_port\n smtpObj = smtplib.SMTP(host=str(smtp_host), port=25)\n smtpObj.set_debuglevel(1)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(user=user_name, password=password)\n for partner in partner_obj.browse(cr, uid, partner_ids):\n if partner.email:\n Text = 'Dear ' + partner.name + ' ,,, \\n\\n This Message From ' + requesition.company_id.name + '\\n' + context['message']\n message = 'Subject: %s\\n\\n%s' % (subject, Text)\n smtpObj.sendmail(user_name, partner.email, message)\n except ValueError:\n print('Oops! That Problem in Username Or Password .Recheck the Outgoing Mail Configration ...')\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n req_obj = self.pool.get('purchase.requisition')\n users = self.pool.get('res.users')\n res = {}\n req_id = context.get('active_ids', [])\n req_rec = req_obj.browse(cr, uid, req_id, context=context)[0]\n users.browse(cr, uid, uid).id\n res.update({'company_id': req_rec.company_id.id, 'request_id': req_rec.id, 'user_id': users.browse(cr, uid, uid).id})\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000028", "length_bytes": 3800, "license_type": "no_license", "methods": [{"docstring": "This Method for send Email For Suppliers", "name": "send_message", "signature": "def send_message(self, cr, uid, ids, context=None)"}, {"docstring": "To get default values for the object. @return: A dictionary which of fields with values.", "name": "default_get", "signature": "def default_get(self, cr, uid, fields, context=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030784", "prompt": "Implement the Python class `purchase_send_email_quotation_wizard` described below.\n\nClass description:\nImplement the purchase_send_email_quotation_wizard class.\n\nMethod signatures and docstrings:\n- def send_message(self, cr, uid, ids, context=None): This Method for send Email For Suppliers\n- def default_get(self, cr, uid, fields, context=None): To get default values for the object. @return: A dictionary which of fields with values.", "prompted_full_text": "Implement the Python class `purchase_send_email_quotation_wizard` described below.\n\nClass description:\nImplement the purchase_send_email_quotation_wizard class.\n\nMethod signatures and docstrings:\n- def send_message(self, cr, uid, ids, context=None): This Method for send Email For Suppliers\n- def default_get(self, cr, uid, fields, context=None): To get default values for the object. @return: A dictionary which of fields with values.\n\n<|skeleton|>\nclass purchase_send_email_quotation_wizard:\n\n def send_message(self, cr, uid, ids, context=None):\n \"\"\"This Method for send Email For Suppliers\"\"\"\n <|body_0|>\n\n def default_get(self, cr, uid, fields, context=None):\n \"\"\"To get default values for the object. @return: A dictionary which of fields with values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n requesition_obj = self.pool.get('purchase.requisition')\n requesition = requesition_obj.browse(cr, uid, context['request_id'])\n subject = 'RFQ For ' + requesition.name\n ir_mail = self.pool.get('ir.mail_server')\n partner_obj = self.pool.get('res.partner')\n partner_ids = requesition_obj.get_partner_ids(cr, uid, ids, requesition, context=context)\n if context['server_mail_id']:\n try:\n mail_rec = ir_mail.browse(cr, uid, context['server_mail_id'])\n user_name = str(mail_rec.smtp_user)\n password = mail_rec.smtp_pass\n smtp_host = str(mail_rec.smtp_host)\n smtp_port = mail_rec.smtp_port\n smtpObj = smtplib.SMTP(host=str(smtp_host), port=25)\n smtpObj.set_debuglevel(1)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(user=user_name, password=password)\n for partner in partner_obj.browse(cr, uid, partner_ids):\n if partner.email:\n Text = 'Dear ' + partner.name + ' ,,, \\n\\n This Message From ' + requesition.company_id.name + '\\n' + context['message']\n message = 'Subject: %s\\n\\n%s' % (subject, Text)\n smtpObj.sendmail(user_name, partner.email, message)\n except ValueError:\n print('Oops! That Problem in Username Or Password .Recheck the Outgoing Mail Configration ...')\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n if context is None:\n context = {}\n req_obj = self.pool.get('purchase.requisition')\n users = self.pool.get('res.users')\n res = {}\n req_id = context.get('active_ids', [])\n req_rec = req_obj.browse(cr, uid, req_id, context=context)[0]\n users.browse(cr, uid, uid).id\n res.update({'company_id': req_rec.company_id.id, 'request_id': req_rec.id, 'user_id': users.browse(cr, uid, uid).id})\n return res\n<|end_body_1|>\n", "revision_id": "0b997095c260d58b026440967fea3a202bef7efb", "skeleton": "<|skeleton|>\nclass purchase_send_email_quotation_wizard:\n\n def send_message(self, cr, uid, ids, context=None):\n \"\"\"This Method for send Email For Suppliers\"\"\"\n <|body_0|>\n\n def default_get(self, cr, uid, fields, context=None):\n \"\"\"To get default values for the object. @return: A dictionary which of fields with values.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class purchase_send_email_quotation_wizard:\n def send_message(self, cr, uid, ids, context=None):\n \"\"\"This Method for send Email For Suppliers\"\"\"\n requesition_obj = self.pool.get('purchase.requisition')\n requesition = requesition_obj.browse(cr, uid, context['request_id'])\n subject = 'RFQ For ' + requesition.name\n ir_mail = self.pool.get('ir.mail_server')\n partner_obj = self.pool.get('res.partner')\n partner_ids = requesition_obj.get_partner_ids(cr, uid, ids, requesition, context=context)\n if context['server_mail_id']:\n try:\n mail_rec = ir_mail.browse(cr, uid, context['server_mail_id'])\n user_name = str(mail_rec.smtp_user)\n password = mail_rec.smtp_pass\n smtp_host = str(mail_rec.smtp_host)\n smtp_port = mail_rec.smtp_port\n smtpObj = smtplib.SMTP(host=str(smtp_host), port=25)\n smtpObj.set_debuglevel(1)\n smtpObj.ehlo()\n smtpObj.starttls()\n smtpObj.login(user=user_name, password=password)\n for partner in partner_obj.browse(cr, uid, partner_ids):\n if partner.email:\n Text = 'Dear ' + partner.name + ' ,,, \\n\\n This Message From ' + requesition.company_id.name + '\\n' + context['message']\n message = 'Subject: %s\\n\\n%s' % (subject, Text)\n smtpObj.sendmail(user_name, partner.email, message)\n except ValueError:\n print('Oops! That Problem in Username Or Password .Recheck the Outgoing Mail Configration ...')\n return True\n\n def default_get(self, cr, uid, fields, context=None):\n \"\"\"To get default values for the object. @return: A dictionary which of fields with values.\"\"\"\n if context is None:\n context = {}\n req_obj = self.pool.get('purchase.requisition')\n users = self.pool.get('res.users')\n res = {}\n req_id = context.get('active_ids', [])\n req_rec = req_obj.browse(cr, uid, req_id, context=context)[0]\n users.browse(cr, uid, uid).id\n res.update({'company_id': req_rec.company_id.id, 'request_id': req_rec.id, 'user_id': users.browse(cr, uid, uid).id})\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "v_7/Dongola/ntc/purchase_send_email_quotation/wizard/purchase_send_email_quotation_wizard.py", "source_repo": "musabahmed/baba", "split": "test", "star_events_count": 0} {"blob_id": "63fbd1b34b2ff4ce29eb2cd8ac26d2f9b9de7c35", "bodies": ["try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n request_time = forgot_password_request.request_time\n expiry_time_min = forgot_password_request.expiry_time\n current_time = datetime.datetime.utcnow().replace(tzinfo=utc)\n expiry_time = request_time + datetime.timedelta(minutes=expiry_time_min)\n if expiry_time >= current_time and (not forgot_password_request.is_expired):\n return Utils.dispatch_success(OK, ['TOKEN_VALID'])\n else:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\nexcept ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\nexcept Exception as e:\n print('Forgot Password Token Validation', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)", "try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n user = forgot_password_request.user\n password = request.data.get('password')\n reenter_password = request.data.get('reenter_password')\n if password is None or reenter_password is None:\n return Utils.dispatch_failure(MISSING_PARAMETERS)\n elif password == reenter_password and (password is not '' or reenter_password is not ''):\n if forgot_password_request.is_expired:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n u = User.objects.get(username=str(user))\n u.set_password(password)\n u.save()\n forgot_password_request.is_expired = True\n forgot_password_request.save()\n else:\n return Utils.dispatch_failure(VALIDATION_MISSING)\nexcept MultipleObjectsReturned:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\nexcept ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\nexcept Exception as e:\n print('Forgot Password Password Change', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\nreturn Utils.dispatch_success(OK, ['PASSWORD_RESET_SUCCESSFUL'])"], "bodies_text": "<|body_start_0|>\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n request_time = forgot_password_request.request_time\n expiry_time_min = forgot_password_request.expiry_time\n current_time = datetime.datetime.utcnow().replace(tzinfo=utc)\n expiry_time = request_time + datetime.timedelta(minutes=expiry_time_min)\n if expiry_time >= current_time and (not forgot_password_request.is_expired):\n return Utils.dispatch_success(OK, ['TOKEN_VALID'])\n else:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Token Validation', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n user = forgot_password_request.user\n password = request.data.get('password')\n reenter_password = request.data.get('reenter_password')\n if password is None or reenter_password is None:\n return Utils.dispatch_failure(MISSING_PARAMETERS)\n elif password == reenter_password and (password is not '' or reenter_password is not ''):\n if forgot_password_request.is_expired:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n u = User.objects.get(username=str(user))\n u.set_password(password)\n u.save()\n forgot_password_request.is_expired = True\n forgot_password_request.save()\n else:\n return Utils.dispatch_failure(VALIDATION_MISSING)\n except MultipleObjectsReturned:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Password Change', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n return Utils.dispatch_success(OK, ['PASSWORD_RESET_SUCCESSFUL'])\n<|end_body_1|>\n", "class_docstring": "Forgot Password Handler", "class_name": "ForgotPassword", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ForgotPassword:\n \"\"\"Forgot Password Handler\"\"\"\n\n def get(self, request, token):\n \"\"\"Forgot Password Token Validator\"\"\"\n <|body_0|>\n\n def post(self, request, token):\n \"\"\"Forgot Password Password Change\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n request_time = forgot_password_request.request_time\n expiry_time_min = forgot_password_request.expiry_time\n current_time = datetime.datetime.utcnow().replace(tzinfo=utc)\n expiry_time = request_time + datetime.timedelta(minutes=expiry_time_min)\n if expiry_time >= current_time and (not forgot_password_request.is_expired):\n return Utils.dispatch_success(OK, ['TOKEN_VALID'])\n else:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Token Validation', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n user = forgot_password_request.user\n password = request.data.get('password')\n reenter_password = request.data.get('reenter_password')\n if password is None or reenter_password is None:\n return Utils.dispatch_failure(MISSING_PARAMETERS)\n elif password == reenter_password and (password is not '' or reenter_password is not ''):\n if forgot_password_request.is_expired:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n u = User.objects.get(username=str(user))\n u.set_password(password)\n u.save()\n forgot_password_request.is_expired = True\n forgot_password_request.save()\n else:\n return Utils.dispatch_failure(VALIDATION_MISSING)\n except MultipleObjectsReturned:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Password Change', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n return Utils.dispatch_success(OK, ['PASSWORD_RESET_SUCCESSFUL'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000029", "length_bytes": 45124, "license_type": "no_license", "methods": [{"docstring": "Forgot Password Token Validator", "name": "get", "signature": "def get(self, request, token)"}, {"docstring": "Forgot Password Password Change", "name": "post", "signature": "def post(self, request, token)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012240", "prompt": "Implement the Python class `ForgotPassword` described below.\n\nClass description:\nForgot Password Handler\n\nMethod signatures and docstrings:\n- def get(self, request, token): Forgot Password Token Validator\n- def post(self, request, token): Forgot Password Password Change", "prompted_full_text": "Implement the Python class `ForgotPassword` described below.\n\nClass description:\nForgot Password Handler\n\nMethod signatures and docstrings:\n- def get(self, request, token): Forgot Password Token Validator\n- def post(self, request, token): Forgot Password Password Change\n\n<|skeleton|>\nclass ForgotPassword:\n \"\"\"Forgot Password Handler\"\"\"\n\n def get(self, request, token):\n \"\"\"Forgot Password Token Validator\"\"\"\n <|body_0|>\n\n def post(self, request, token):\n \"\"\"Forgot Password Password Change\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n request_time = forgot_password_request.request_time\n expiry_time_min = forgot_password_request.expiry_time\n current_time = datetime.datetime.utcnow().replace(tzinfo=utc)\n expiry_time = request_time + datetime.timedelta(minutes=expiry_time_min)\n if expiry_time >= current_time and (not forgot_password_request.is_expired):\n return Utils.dispatch_success(OK, ['TOKEN_VALID'])\n else:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Token Validation', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n user = forgot_password_request.user\n password = request.data.get('password')\n reenter_password = request.data.get('reenter_password')\n if password is None or reenter_password is None:\n return Utils.dispatch_failure(MISSING_PARAMETERS)\n elif password == reenter_password and (password is not '' or reenter_password is not ''):\n if forgot_password_request.is_expired:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n u = User.objects.get(username=str(user))\n u.set_password(password)\n u.save()\n forgot_password_request.is_expired = True\n forgot_password_request.save()\n else:\n return Utils.dispatch_failure(VALIDATION_MISSING)\n except MultipleObjectsReturned:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Password Change', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n return Utils.dispatch_success(OK, ['PASSWORD_RESET_SUCCESSFUL'])\n<|end_body_1|>\n", "revision_id": "dbcf886a7cf2d2fb12400a0f1b3e85e8da5cd56b", "skeleton": "<|skeleton|>\nclass ForgotPassword:\n \"\"\"Forgot Password Handler\"\"\"\n\n def get(self, request, token):\n \"\"\"Forgot Password Token Validator\"\"\"\n <|body_0|>\n\n def post(self, request, token):\n \"\"\"Forgot Password Password Change\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ForgotPassword:\n \"\"\"Forgot Password Handler\"\"\"\n\n def get(self, request, token):\n \"\"\"Forgot Password Token Validator\"\"\"\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n request_time = forgot_password_request.request_time\n expiry_time_min = forgot_password_request.expiry_time\n current_time = datetime.datetime.utcnow().replace(tzinfo=utc)\n expiry_time = request_time + datetime.timedelta(minutes=expiry_time_min)\n if expiry_time >= current_time and (not forgot_password_request.is_expired):\n return Utils.dispatch_success(OK, ['TOKEN_VALID'])\n else:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Token Validation', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n\n def post(self, request, token):\n \"\"\"Forgot Password Password Change\"\"\"\n try:\n forgot_password_request = ForgotPasswordRequest.objects.get(token=token)\n user = forgot_password_request.user\n password = request.data.get('password')\n reenter_password = request.data.get('reenter_password')\n if password is None or reenter_password is None:\n return Utils.dispatch_failure(MISSING_PARAMETERS)\n elif password == reenter_password and (password is not '' or reenter_password is not ''):\n if forgot_password_request.is_expired:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n u = User.objects.get(username=str(user))\n u.set_password(password)\n u.save()\n forgot_password_request.is_expired = True\n forgot_password_request.save()\n else:\n return Utils.dispatch_failure(VALIDATION_MISSING)\n except MultipleObjectsReturned:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except ForgotPasswordRequest.DoesNotExist:\n return Utils.dispatch_failure(TOKEN_EXPIRED)\n except Exception as e:\n print('Forgot Password Password Change', e)\n return Utils.dispatch_failure(INTERNAL_SERVER_ERROR)\n return Utils.dispatch_success(OK, ['PASSWORD_RESET_SUCCESSFUL'])\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/ixcoin_backend/api/accounts/views.py", "source_repo": "ionixx-tech/ix_code_samples", "split": "test", "star_events_count": 0} {"blob_id": "5294186d62855fa8c7ecf8d92e689bf0d0813694", "bodies": ["super().__init__()\nself.linear1 = torch.nn.Linear(size, hidden_size)\nself.linear2 = torch.nn.Linear(hidden_size, size)\nself.normalization = normalization\nself.activation = activation\nself.dropout = torch.nn.Dropout(p=dropout_rate)\nself.hidden_dropout = torch.nn.Dropout(p=dropout_rate)\nself.reset_parameters()", "torch.nn.init.normal_(self.linear1.weight, mean=val, std=std)\ntorch.nn.init.constant_(self.linear1.bias, val)\ntorch.nn.init.normal_(self.linear2.weight, mean=val, std=std)\ntorch.nn.init.constant_(self.linear2.bias, val)", "residual = x\nx = self.hidden_dropout(self.activation(self.linear1(x)))\nx = self.dropout(self.linear2(x))\nx = x + residual\nx = self.normalization(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.linear1 = torch.nn.Linear(size, hidden_size)\n self.linear2 = torch.nn.Linear(hidden_size, size)\n self.normalization = normalization\n self.activation = activation\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.hidden_dropout = torch.nn.Dropout(p=dropout_rate)\n self.reset_parameters()\n<|end_body_0|>\n\n<|body_start_1|>\n torch.nn.init.normal_(self.linear1.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear1.bias, val)\n torch.nn.init.normal_(self.linear2.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear2.bias, val)\n<|end_body_1|>\n\n<|body_start_2|>\n residual = x\n x = self.hidden_dropout(self.activation(self.linear1(x)))\n x = self.dropout(self.linear2(x))\n x = x + residual\n x = self.normalization(x)\n return x\n<|end_body_2|>\n", "class_docstring": "NormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.", "class_name": "NormalizedPositionwiseFeedForward", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NormalizedPositionwiseFeedForward:\n \"\"\"NormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None:\n \"\"\"Construct an NormalizedPositionwiseFeedForward object.\"\"\"\n <|body_0|>\n\n def reset_parameters(self, val: float=0.0, std: float=0.02) -> None:\n \"\"\"Reset module parameters. Args: val: Initialization value. std: Standard deviation.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.linear1 = torch.nn.Linear(size, hidden_size)\n self.linear2 = torch.nn.Linear(hidden_size, size)\n self.normalization = normalization\n self.activation = activation\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.hidden_dropout = torch.nn.Dropout(p=dropout_rate)\n self.reset_parameters()\n<|end_body_0|>\n\n<|body_start_1|>\n torch.nn.init.normal_(self.linear1.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear1.bias, val)\n torch.nn.init.normal_(self.linear2.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear2.bias, val)\n<|end_body_1|>\n\n<|body_start_2|>\n residual = x\n x = self.hidden_dropout(self.activation(self.linear1(x)))\n x = self.dropout(self.linear2(x))\n x = x + residual\n x = self.normalization(x)\n return x\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000030", "length_bytes": 2076, "license_type": "permissive", "methods": [{"docstring": "Construct an NormalizedPositionwiseFeedForward object.", "name": "__init__", "signature": "def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None"}, {"docstring": "Reset module parameters. Args: val: Initialization value. std: Standard deviation.", "name": "reset_parameters", "signature": "def reset_parameters(self, val: float=0.0, std: float=0.02) -> None"}, {"docstring": "Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)", "name": "forward", "signature": "def forward(self, x: torch.Tensor) -> torch.Tensor"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_043933", "prompt": "Implement the Python class `NormalizedPositionwiseFeedForward` described below.\n\nClass description:\nNormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.\n\nMethod signatures and docstrings:\n- def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None: Construct an NormalizedPositionwiseFeedForward object.\n- def reset_parameters(self, val: float=0.0, std: float=0.02) -> None: Reset module parameters. Args: val: Initialization value. std: Standard deviation.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)", "prompted_full_text": "Implement the Python class `NormalizedPositionwiseFeedForward` described below.\n\nClass description:\nNormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.\n\nMethod signatures and docstrings:\n- def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None: Construct an NormalizedPositionwiseFeedForward object.\n- def reset_parameters(self, val: float=0.0, std: float=0.02) -> None: Reset module parameters. Args: val: Initialization value. std: Standard deviation.\n- def forward(self, x: torch.Tensor) -> torch.Tensor: Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)\n\n<|skeleton|>\nclass NormalizedPositionwiseFeedForward:\n \"\"\"NormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None:\n \"\"\"Construct an NormalizedPositionwiseFeedForward object.\"\"\"\n <|body_0|>\n\n def reset_parameters(self, val: float=0.0, std: float=0.02) -> None:\n \"\"\"Reset module parameters. Args: val: Initialization value. std: Standard deviation.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.linear1 = torch.nn.Linear(size, hidden_size)\n self.linear2 = torch.nn.Linear(hidden_size, size)\n self.normalization = normalization\n self.activation = activation\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.hidden_dropout = torch.nn.Dropout(p=dropout_rate)\n self.reset_parameters()\n<|end_body_0|>\n\n<|body_start_1|>\n torch.nn.init.normal_(self.linear1.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear1.bias, val)\n torch.nn.init.normal_(self.linear2.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear2.bias, val)\n<|end_body_1|>\n\n<|body_start_2|>\n residual = x\n x = self.hidden_dropout(self.activation(self.linear1(x)))\n x = self.dropout(self.linear2(x))\n x = x + residual\n x = self.normalization(x)\n return x\n<|end_body_2|>\n", "revision_id": "bcd20948db7846ee523443ef9fd78c7a1248c95e", "skeleton": "<|skeleton|>\nclass NormalizedPositionwiseFeedForward:\n \"\"\"NormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None:\n \"\"\"Construct an NormalizedPositionwiseFeedForward object.\"\"\"\n <|body_0|>\n\n def reset_parameters(self, val: float=0.0, std: float=0.02) -> None:\n \"\"\"Reset module parameters. Args: val: Initialization value. std: Standard deviation.\"\"\"\n <|body_1|>\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NormalizedPositionwiseFeedForward:\n \"\"\"NormalizedPositionFeedForward module definition. Args: size: Input/Output size. hidden_size: Hidden size. normalization: Normalization module. activation: Activation function. dropout_rate: Dropout rate.\"\"\"\n\n def __init__(self, size: int, hidden_size: int, normalization: torch.nn.Module=torch.nn.LayerNorm, activation: torch.nn.Module=torch.nn.ReLU, dropout_rate: float=0.0) -> None:\n \"\"\"Construct an NormalizedPositionwiseFeedForward object.\"\"\"\n super().__init__()\n self.linear1 = torch.nn.Linear(size, hidden_size)\n self.linear2 = torch.nn.Linear(hidden_size, size)\n self.normalization = normalization\n self.activation = activation\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.hidden_dropout = torch.nn.Dropout(p=dropout_rate)\n self.reset_parameters()\n\n def reset_parameters(self, val: float=0.0, std: float=0.02) -> None:\n \"\"\"Reset module parameters. Args: val: Initialization value. std: Standard deviation.\"\"\"\n torch.nn.init.normal_(self.linear1.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear1.bias, val)\n torch.nn.init.normal_(self.linear2.weight, mean=val, std=std)\n torch.nn.init.constant_(self.linear2.bias, val)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Compute feed-forward module. Args: x: NormalizedPositionwiseFeedForward input sequences. (B, L, size) Returns: x: NormalizedPositionwiseFeedForward output sequences. (B, L, size)\"\"\"\n residual = x\n x = self.hidden_dropout(self.activation(self.linear1(x)))\n x = self.dropout(self.linear2(x))\n x = x + residual\n x = self.normalization(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "espnet2/asr_transducer/decoder/modules/mega/feed_forward.py", "source_repo": "espnet/espnet", "split": "test", "star_events_count": 7242} {"blob_id": "83625a72efaa8aa247254fa3c70287792d4faa5f", "bodies": ["rsync_cmd = Command('rsync', '-av')\nif dry_run:\n rsync_cmd.add_args('--dry-run')\nif re.compile('^([^@]*@)?[^:]*:').match(target) or re.compile('^([^@]*@)?[^:]*:').match(source):\n rsync_cmd.add_args('-e', 'ssh')\nif mirror:\n rsync_cmd.add_args('--delete-after')\nif prune_empty_dirs:\n rsync_cmd.add_args('-m')\nif chmod is not None:\n rsync_cmd.add_args('--chmod=%s' % chmod)\nif extra_options is not None:\n rsync_cmd.add_args(*extra_options)\nif ' ' in source:\n source = source.replace(' ', '\\\\ ')\nif ' ' in target:\n target = target.replace(' ', '\\\\ ')\nrsync_cmd.add_args(source, target)\nreturn rsync_cmd", "make_cmd = Command('make')\nif working_dir is not None:\n make_cmd.add_args('-C', working_dir)\nif nprocessors is not None:\n make_cmd.add_args('-j', nprocessors)\nif makefile is not None:\n make_cmd.add_args('-f', makefile)\nreturn make_cmd", "ssh_command = Command('ssh', '%s%s' % ('%s@' % user if user else '', server))\nssh_command.add_args(*cmd)\nreturn ssh_command", "scp_command = Command('scp')\nif recursive:\n scp_command.add_args('-r')\nscp_command.add_args(source, '%s%s:%s' % ('%s@' % user if user else '', server, target))\nreturn scp_command"], "bodies_text": "<|body_start_0|>\n rsync_cmd = Command('rsync', '-av')\n if dry_run:\n rsync_cmd.add_args('--dry-run')\n if re.compile('^([^@]*@)?[^:]*:').match(target) or re.compile('^([^@]*@)?[^:]*:').match(source):\n rsync_cmd.add_args('-e', 'ssh')\n if mirror:\n rsync_cmd.add_args('--delete-after')\n if prune_empty_dirs:\n rsync_cmd.add_args('-m')\n if chmod is not None:\n rsync_cmd.add_args('--chmod=%s' % chmod)\n if extra_options is not None:\n rsync_cmd.add_args(*extra_options)\n if ' ' in source:\n source = source.replace(' ', '\\\\ ')\n if ' ' in target:\n target = target.replace(' ', '\\\\ ')\n rsync_cmd.add_args(source, target)\n return rsync_cmd\n<|end_body_0|>\n\n<|body_start_1|>\n make_cmd = Command('make')\n if working_dir is not None:\n make_cmd.add_args('-C', working_dir)\n if nprocessors is not None:\n make_cmd.add_args('-j', nprocessors)\n if makefile is not None:\n make_cmd.add_args('-f', makefile)\n return make_cmd\n<|end_body_1|>\n\n<|body_start_2|>\n ssh_command = Command('ssh', '%s%s' % ('%s@' % user if user else '', server))\n ssh_command.add_args(*cmd)\n return ssh_command\n<|end_body_2|>\n\n<|body_start_3|>\n scp_command = Command('scp')\n if recursive:\n scp_command.add_args('-r')\n scp_command.add_args(source, '%s%s:%s' % ('%s@' % user if user else '', server, target))\n return scp_command\n<|end_body_3|>\n", "class_docstring": "General command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make", "class_name": "general", "detected_licenses": ["AFL-3.0", "LicenseRef-scancode-unknown-license-reference", "AFL-2.1"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass general:\n \"\"\"General command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make\"\"\"\n\n def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None):\n \"\"\"Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch\"\"\"\n <|body_0|>\n\n def make(makefile=None, working_dir=None, nprocessors=None):\n \"\"\"Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.\"\"\"\n <|body_1|>\n\n def ssh_command(user, server, cmd):\n \"\"\"Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.\"\"\"\n <|body_2|>\n\n def scp(user, server, source, target, recursive=False):\n \"\"\"Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rsync_cmd = Command('rsync', '-av')\n if dry_run:\n rsync_cmd.add_args('--dry-run')\n if re.compile('^([^@]*@)?[^:]*:').match(target) or re.compile('^([^@]*@)?[^:]*:').match(source):\n rsync_cmd.add_args('-e', 'ssh')\n if mirror:\n rsync_cmd.add_args('--delete-after')\n if prune_empty_dirs:\n rsync_cmd.add_args('-m')\n if chmod is not None:\n rsync_cmd.add_args('--chmod=%s' % chmod)\n if extra_options is not None:\n rsync_cmd.add_args(*extra_options)\n if ' ' in source:\n source = source.replace(' ', '\\\\ ')\n if ' ' in target:\n target = target.replace(' ', '\\\\ ')\n rsync_cmd.add_args(source, target)\n return rsync_cmd\n<|end_body_0|>\n\n<|body_start_1|>\n make_cmd = Command('make')\n if working_dir is not None:\n make_cmd.add_args('-C', working_dir)\n if nprocessors is not None:\n make_cmd.add_args('-j', nprocessors)\n if makefile is not None:\n make_cmd.add_args('-f', makefile)\n return make_cmd\n<|end_body_1|>\n\n<|body_start_2|>\n ssh_command = Command('ssh', '%s%s' % ('%s@' % user if user else '', server))\n ssh_command.add_args(*cmd)\n return ssh_command\n<|end_body_2|>\n\n<|body_start_3|>\n scp_command = Command('scp')\n if recursive:\n scp_command.add_args('-r')\n scp_command.add_args(source, '%s%s:%s' % ('%s@' % user if user else '', server, target))\n return scp_command\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000031", "length_bytes": 19588, "license_type": "permissive", "methods": [{"docstring": "Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch", "name": "rsync", "signature": "def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None)"}, {"docstring": "Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.", "name": "make", "signature": "def make(makefile=None, working_dir=None, nprocessors=None)"}, {"docstring": "Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.", "name": "ssh_command", "signature": "def ssh_command(user, server, cmd)"}, {"docstring": "Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.", "name": "scp", "signature": "def scp(user, server, source, target, recursive=False)"}], "n_methods": 4, "prompt": "Implement the Python class `general` described below.\n\nClass description:\nGeneral command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make\n\nMethod signatures and docstrings:\n- def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None): Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch\n- def make(makefile=None, working_dir=None, nprocessors=None): Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.\n- def ssh_command(user, server, cmd): Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.\n- def scp(user, server, source, target, recursive=False): Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.", "prompted_full_text": "Implement the Python class `general` described below.\n\nClass description:\nGeneral command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make\n\nMethod signatures and docstrings:\n- def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None): Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch\n- def make(makefile=None, working_dir=None, nprocessors=None): Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.\n- def ssh_command(user, server, cmd): Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.\n- def scp(user, server, source, target, recursive=False): Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.\n\n<|skeleton|>\nclass general:\n \"\"\"General command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make\"\"\"\n\n def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None):\n \"\"\"Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch\"\"\"\n <|body_0|>\n\n def make(makefile=None, working_dir=None, nprocessors=None):\n \"\"\"Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.\"\"\"\n <|body_1|>\n\n def ssh_command(user, server, cmd):\n \"\"\"Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.\"\"\"\n <|body_2|>\n\n def scp(user, server, source, target, recursive=False):\n \"\"\"Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rsync_cmd = Command('rsync', '-av')\n if dry_run:\n rsync_cmd.add_args('--dry-run')\n if re.compile('^([^@]*@)?[^:]*:').match(target) or re.compile('^([^@]*@)?[^:]*:').match(source):\n rsync_cmd.add_args('-e', 'ssh')\n if mirror:\n rsync_cmd.add_args('--delete-after')\n if prune_empty_dirs:\n rsync_cmd.add_args('-m')\n if chmod is not None:\n rsync_cmd.add_args('--chmod=%s' % chmod)\n if extra_options is not None:\n rsync_cmd.add_args(*extra_options)\n if ' ' in source:\n source = source.replace(' ', '\\\\ ')\n if ' ' in target:\n target = target.replace(' ', '\\\\ ')\n rsync_cmd.add_args(source, target)\n return rsync_cmd\n<|end_body_0|>\n\n<|body_start_1|>\n make_cmd = Command('make')\n if working_dir is not None:\n make_cmd.add_args('-C', working_dir)\n if nprocessors is not None:\n make_cmd.add_args('-j', nprocessors)\n if makefile is not None:\n make_cmd.add_args('-f', makefile)\n return make_cmd\n<|end_body_1|>\n\n<|body_start_2|>\n ssh_command = Command('ssh', '%s%s' % ('%s@' % user if user else '', server))\n ssh_command.add_args(*cmd)\n return ssh_command\n<|end_body_2|>\n\n<|body_start_3|>\n scp_command = Command('scp')\n if recursive:\n scp_command.add_args('-r')\n scp_command.add_args(source, '%s%s:%s' % ('%s@' % user if user else '', server, target))\n return scp_command\n<|end_body_3|>\n", "revision_id": "d3da61b858560d87ff070c34d906ec12717bd81d", "skeleton": "<|skeleton|>\nclass general:\n \"\"\"General command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make\"\"\"\n\n def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None):\n \"\"\"Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch\"\"\"\n <|body_0|>\n\n def make(makefile=None, working_dir=None, nprocessors=None):\n \"\"\"Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.\"\"\"\n <|body_1|>\n\n def ssh_command(user, server, cmd):\n \"\"\"Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.\"\"\"\n <|body_2|>\n\n def scp(user, server, source, target, recursive=False):\n \"\"\"Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class general:\n \"\"\"General command line applications (e.g. rsync, make) Provides static methods to create Command instances for a class of 'general' command line applications: rsync make\"\"\"\n\n def rsync(source, target, dry_run=False, mirror=False, chmod=None, prune_empty_dirs=False, extra_options=None):\n \"\"\"Generate Command instance for 'rsync' command Create a Command instance to run the 'rsync' command line, to recursively copy/sync one directory (the 'source') into another (the 'target'). The target can be a local directory or on a remote system (in which case it should be qualified with a user and hostname i.e. 'user@hostname:target'). Arguments: source: the directory being copied/sync'ed target: the directory the source will be copied into dry_run: run rsync using --dry-run option i.e. no files will be copied/sync'ed, just reported mirror: if True then run rsync in 'mirror' mode i.e. with --delete-after option (to remove files from the target that have also been removed from the source) ch\"\"\"\n rsync_cmd = Command('rsync', '-av')\n if dry_run:\n rsync_cmd.add_args('--dry-run')\n if re.compile('^([^@]*@)?[^:]*:').match(target) or re.compile('^([^@]*@)?[^:]*:').match(source):\n rsync_cmd.add_args('-e', 'ssh')\n if mirror:\n rsync_cmd.add_args('--delete-after')\n if prune_empty_dirs:\n rsync_cmd.add_args('-m')\n if chmod is not None:\n rsync_cmd.add_args('--chmod=%s' % chmod)\n if extra_options is not None:\n rsync_cmd.add_args(*extra_options)\n if ' ' in source:\n source = source.replace(' ', '\\\\ ')\n if ' ' in target:\n target = target.replace(' ', '\\\\ ')\n rsync_cmd.add_args(source, target)\n return rsync_cmd\n\n def make(makefile=None, working_dir=None, nprocessors=None):\n \"\"\"Generate Command instance for 'make' command Creates a Command instance to run 'make'. Arguments: makefile: optional, name of input Makefile (-f) working_dir: optional, specify the working directory to change to (-C) nprocessors: optional, specify number of processors to use (-j) Returns: Command object.\"\"\"\n make_cmd = Command('make')\n if working_dir is not None:\n make_cmd.add_args('-C', working_dir)\n if nprocessors is not None:\n make_cmd.add_args('-j', nprocessors)\n if makefile is not None:\n make_cmd.add_args('-f', makefile)\n return make_cmd\n\n def ssh_command(user, server, cmd):\n \"\"\"Generate Command instance for 'ssh' to execute a remote command Creates a Command instance to run 'ssh ... COMMAND'. Arguments: user: name of the remote user server: name of the server cmd: command to execute on the server via ssh Returns: Command object.\"\"\"\n ssh_command = Command('ssh', '%s%s' % ('%s@' % user if user else '', server))\n ssh_command.add_args(*cmd)\n return ssh_command\n\n def scp(user, server, source, target, recursive=False):\n \"\"\"Generate Command instance for 'scp' Creates a Command instance to run 'scp' to copy to another system. Arguments: user: name of the remote user server: name of the server source: source file on local system target: target destination on remote system recursive: optional, if True then copy source recursively (i.e. specify the '-r' option) Returns: Command object.\"\"\"\n scp_command = Command('scp')\n if recursive:\n scp_command.add_args('-r')\n scp_command.add_args(source, '%s%s:%s' % ('%s@' % user if user else '', server, target))\n return scp_command\n", "source": "the_stack_v2_python_sparse", "source_path": "auto_process_ngs/applications.py", "source_repo": "fls-bioinformatics-core/auto_process_ngs", "split": "test", "star_events_count": 8} {"blob_id": "e5c2fb609392b15a4486716337611725b3994688", "bodies": ["while True:\n await instance.write(value=device.x)\n await async_lib.library.sleep(self.dt.value)", "while True:\n from datetime import datetime\n date_time = datetime.fromtimestamp(device.t)\n t = date_time.strftime('%m/%d/%Y, %H:%M:%S')\n await instance.write(value=t)\n await async_lib.library.sleep(self.dt.value)"], "bodies_text": "<|body_start_0|>\n while True:\n await instance.write(value=device.x)\n await async_lib.library.sleep(self.dt.value)\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n from datetime import datetime\n date_time = datetime.fromtimestamp(device.t)\n t = date_time.strftime('%m/%d/%Y, %H:%M:%S')\n await instance.write(value=t)\n await async_lib.library.sleep(self.dt.value)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RandomWalkIO", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RandomWalkIO:\n\n async def x(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n <|body_0|>\n\n async def t(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while True:\n await instance.write(value=device.x)\n await async_lib.library.sleep(self.dt.value)\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n from datetime import datetime\n date_time = datetime.fromtimestamp(device.t)\n t = date_time.strftime('%m/%d/%Y, %H:%M:%S')\n await instance.write(value=t)\n await async_lib.library.sleep(self.dt.value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000032", "length_bytes": 2542, "license_type": "permissive", "methods": [{"docstring": "Periodically update the value", "name": "x", "signature": "async def x(self, instance, async_lib)"}, {"docstring": "Periodically update the value", "name": "t", "signature": "async def t(self, instance, async_lib)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037196", "prompt": "Implement the Python class `RandomWalkIO` described below.\n\nClass description:\nImplement the RandomWalkIO class.\n\nMethod signatures and docstrings:\n- async def x(self, instance, async_lib): Periodically update the value\n- async def t(self, instance, async_lib): Periodically update the value", "prompted_full_text": "Implement the Python class `RandomWalkIO` described below.\n\nClass description:\nImplement the RandomWalkIO class.\n\nMethod signatures and docstrings:\n- async def x(self, instance, async_lib): Periodically update the value\n- async def t(self, instance, async_lib): Periodically update the value\n\n<|skeleton|>\nclass RandomWalkIO:\n\n async def x(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n <|body_0|>\n\n async def t(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n while True:\n await instance.write(value=device.x)\n await async_lib.library.sleep(self.dt.value)\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n from datetime import datetime\n date_time = datetime.fromtimestamp(device.t)\n t = date_time.strftime('%m/%d/%Y, %H:%M:%S')\n await instance.write(value=t)\n await async_lib.library.sleep(self.dt.value)\n<|end_body_1|>\n", "revision_id": "89601b263a55dda94a11ca9e570873c94a840fa7", "skeleton": "<|skeleton|>\nclass RandomWalkIO:\n\n async def x(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n <|body_0|>\n\n async def t(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RandomWalkIO:\n async def x(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n while True:\n await instance.write(value=device.x)\n await async_lib.library.sleep(self.dt.value)\n\n async def t(self, instance, async_lib):\n \"\"\"Periodically update the value\"\"\"\n while True:\n from datetime import datetime\n date_time = datetime.fromtimestamp(device.t)\n t = date_time.strftime('%m/%d/%Y, %H:%M:%S')\n await instance.write(value=t)\n await async_lib.library.sleep(self.dt.value)\n", "source": "the_stack_v2_python_sparse", "source_path": "caproto_sandbox/random_walk_modified.py", "source_repo": "vstadnytskyi/caproto-sandbox", "split": "test", "star_events_count": 0} {"blob_id": "b5bfd2d7071f2a010750ef63eec39e40dd5c553e", "bodies": ["try:\n doc = Document.objects.get(id=doc_id)\nexcept Document.DoesNotExist:\n raise Http404('Document does not exists')\npage_nums = request.GET.getlist('pages[]')\npage_nums = [int(number) for number in page_nums]\ndoc.delete_pages(page_numbers=page_nums)\nreturn Response(status=status.HTTP_204_NO_CONTENT)", "try:\n doc = Document.objects.get(id=doc_id)\nexcept Document.DoesNotExist:\n raise Http404('Document does not exists')\ndoc.reorder_pages(request.data)\nreturn Response(status=status.HTTP_204_NO_CONTENT)"], "bodies_text": "<|body_start_0|>\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n page_nums = request.GET.getlist('pages[]')\n page_nums = [int(number) for number in page_nums]\n doc.delete_pages(page_numbers=page_nums)\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n doc.reorder_pages(request.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PagesView", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PagesView:\n\n def delete(self, request, doc_id):\n \"\"\"Deletes Pages from doc_id document\"\"\"\n <|body_0|>\n\n def post(self, request, doc_id):\n \"\"\"Reorders pages from doc_id document\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n page_nums = request.GET.getlist('pages[]')\n page_nums = [int(number) for number in page_nums]\n doc.delete_pages(page_numbers=page_nums)\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n doc.reorder_pages(request.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000033", "length_bytes": 3021, "license_type": "permissive", "methods": [{"docstring": "Deletes Pages from doc_id document", "name": "delete", "signature": "def delete(self, request, doc_id)"}, {"docstring": "Reorders pages from doc_id document", "name": "post", "signature": "def post(self, request, doc_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_011114", "prompt": "Implement the Python class `PagesView` described below.\n\nClass description:\nImplement the PagesView class.\n\nMethod signatures and docstrings:\n- def delete(self, request, doc_id): Deletes Pages from doc_id document\n- def post(self, request, doc_id): Reorders pages from doc_id document", "prompted_full_text": "Implement the Python class `PagesView` described below.\n\nClass description:\nImplement the PagesView class.\n\nMethod signatures and docstrings:\n- def delete(self, request, doc_id): Deletes Pages from doc_id document\n- def post(self, request, doc_id): Reorders pages from doc_id document\n\n<|skeleton|>\nclass PagesView:\n\n def delete(self, request, doc_id):\n \"\"\"Deletes Pages from doc_id document\"\"\"\n <|body_0|>\n\n def post(self, request, doc_id):\n \"\"\"Reorders pages from doc_id document\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n page_nums = request.GET.getlist('pages[]')\n page_nums = [int(number) for number in page_nums]\n doc.delete_pages(page_numbers=page_nums)\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n doc.reorder_pages(request.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_1|>\n", "revision_id": "d177f1af331214e0f62407624e7029ce4953bd9b", "skeleton": "<|skeleton|>\nclass PagesView:\n\n def delete(self, request, doc_id):\n \"\"\"Deletes Pages from doc_id document\"\"\"\n <|body_0|>\n\n def post(self, request, doc_id):\n \"\"\"Reorders pages from doc_id document\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PagesView:\n def delete(self, request, doc_id):\n \"\"\"Deletes Pages from doc_id document\"\"\"\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n page_nums = request.GET.getlist('pages[]')\n page_nums = [int(number) for number in page_nums]\n doc.delete_pages(page_numbers=page_nums)\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def post(self, request, doc_id):\n \"\"\"Reorders pages from doc_id document\"\"\"\n try:\n doc = Document.objects.get(id=doc_id)\n except Document.DoesNotExist:\n raise Http404('Document does not exists')\n doc.reorder_pages(request.data)\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "source": "the_stack_v2_python_sparse", "source_path": "papermerge/core/views/api.py", "source_repo": "ebdavison/papermerge", "split": "test", "star_events_count": 0} {"blob_id": "763ef10f4ff65d3949f6b6c54dd3afaf4695e5fa", "bodies": ["permissions = list(self.own_permissions.all())\nfor parent in self.parents.all():\n permissions += list(parent.permissions.all())\nself.permissions.set(permissions)\nfor sub in self.sub_groups.all():\n sub.update_permissions()", "subs = self.sub_groups.all()\nfor sub in self.sub_groups.all():\n subs = subs.union(sub.get_sub_groups())\nreturn subs", "parents = self.parents.all()\nfor parent in self.parents.all():\n parents = parents.union(parent.get_all_parents())\nreturn parents", "parents = InheritanceGroup.objects.exclude(pk=self.pk)\nfor sub in self.get_sub_groups():\n parents = parents.exclude(pk=sub.pk)\nreturn parents"], "bodies_text": "<|body_start_0|>\n permissions = list(self.own_permissions.all())\n for parent in self.parents.all():\n permissions += list(parent.permissions.all())\n self.permissions.set(permissions)\n for sub in self.sub_groups.all():\n sub.update_permissions()\n<|end_body_0|>\n\n<|body_start_1|>\n subs = self.sub_groups.all()\n for sub in self.sub_groups.all():\n subs = subs.union(sub.get_sub_groups())\n return subs\n<|end_body_1|>\n\n<|body_start_2|>\n parents = self.parents.all()\n for parent in self.parents.all():\n parents = parents.union(parent.get_all_parents())\n return parents\n<|end_body_2|>\n\n<|body_start_3|>\n parents = InheritanceGroup.objects.exclude(pk=self.pk)\n for sub in self.get_sub_groups():\n parents = parents.exclude(pk=sub.pk)\n return parents\n<|end_body_3|>\n", "class_docstring": "A group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.", "class_name": "InheritanceGroup", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InheritanceGroup:\n \"\"\"A group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.\"\"\"\n\n def update_permissions(self):\n \"\"\"Update the permissions of this and all sub groups.\"\"\"\n <|body_0|>\n\n def get_sub_groups(self):\n \"\"\"Return a queryset of all groups that inherits from this group.\"\"\"\n <|body_1|>\n\n def get_all_parents(self):\n \"\"\"Return a queryset of all groups that this group inherits from.\"\"\"\n <|body_2|>\n\n def get_available_parents(self):\n \"\"\"Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n permissions = list(self.own_permissions.all())\n for parent in self.parents.all():\n permissions += list(parent.permissions.all())\n self.permissions.set(permissions)\n for sub in self.sub_groups.all():\n sub.update_permissions()\n<|end_body_0|>\n\n<|body_start_1|>\n subs = self.sub_groups.all()\n for sub in self.sub_groups.all():\n subs = subs.union(sub.get_sub_groups())\n return subs\n<|end_body_1|>\n\n<|body_start_2|>\n parents = self.parents.all()\n for parent in self.parents.all():\n parents = parents.union(parent.get_all_parents())\n return parents\n<|end_body_2|>\n\n<|body_start_3|>\n parents = InheritanceGroup.objects.exclude(pk=self.pk)\n for sub in self.get_sub_groups():\n parents = parents.exclude(pk=sub.pk)\n return parents\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000034", "length_bytes": 16403, "license_type": "no_license", "methods": [{"docstring": "Update the permissions of this and all sub groups.", "name": "update_permissions", "signature": "def update_permissions(self)"}, {"docstring": "Return a queryset of all groups that inherits from this group.", "name": "get_sub_groups", "signature": "def get_sub_groups(self)"}, {"docstring": "Return a queryset of all groups that this group inherits from.", "name": "get_all_parents", "signature": "def get_all_parents(self)"}, {"docstring": "Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.", "name": "get_available_parents", "signature": "def get_available_parents(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_022241", "prompt": "Implement the Python class `InheritanceGroup` described below.\n\nClass description:\nA group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.\n\nMethod signatures and docstrings:\n- def update_permissions(self): Update the permissions of this and all sub groups.\n- def get_sub_groups(self): Return a queryset of all groups that inherits from this group.\n- def get_all_parents(self): Return a queryset of all groups that this group inherits from.\n- def get_available_parents(self): Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.", "prompted_full_text": "Implement the Python class `InheritanceGroup` described below.\n\nClass description:\nA group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.\n\nMethod signatures and docstrings:\n- def update_permissions(self): Update the permissions of this and all sub groups.\n- def get_sub_groups(self): Return a queryset of all groups that inherits from this group.\n- def get_all_parents(self): Return a queryset of all groups that this group inherits from.\n- def get_available_parents(self): Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.\n\n<|skeleton|>\nclass InheritanceGroup:\n \"\"\"A group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.\"\"\"\n\n def update_permissions(self):\n \"\"\"Update the permissions of this and all sub groups.\"\"\"\n <|body_0|>\n\n def get_sub_groups(self):\n \"\"\"Return a queryset of all groups that inherits from this group.\"\"\"\n <|body_1|>\n\n def get_all_parents(self):\n \"\"\"Return a queryset of all groups that this group inherits from.\"\"\"\n <|body_2|>\n\n def get_available_parents(self):\n \"\"\"Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n permissions = list(self.own_permissions.all())\n for parent in self.parents.all():\n permissions += list(parent.permissions.all())\n self.permissions.set(permissions)\n for sub in self.sub_groups.all():\n sub.update_permissions()\n<|end_body_0|>\n\n<|body_start_1|>\n subs = self.sub_groups.all()\n for sub in self.sub_groups.all():\n subs = subs.union(sub.get_sub_groups())\n return subs\n<|end_body_1|>\n\n<|body_start_2|>\n parents = self.parents.all()\n for parent in self.parents.all():\n parents = parents.union(parent.get_all_parents())\n return parents\n<|end_body_2|>\n\n<|body_start_3|>\n parents = InheritanceGroup.objects.exclude(pk=self.pk)\n for sub in self.get_sub_groups():\n parents = parents.exclude(pk=sub.pk)\n return parents\n<|end_body_3|>\n", "revision_id": "708071d144b06ab289abdea6046437c40a81d230", "skeleton": "<|skeleton|>\nclass InheritanceGroup:\n \"\"\"A group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.\"\"\"\n\n def update_permissions(self):\n \"\"\"Update the permissions of this and all sub groups.\"\"\"\n <|body_0|>\n\n def get_sub_groups(self):\n \"\"\"Return a queryset of all groups that inherits from this group.\"\"\"\n <|body_1|>\n\n def get_all_parents(self):\n \"\"\"Return a queryset of all groups that this group inherits from.\"\"\"\n <|body_2|>\n\n def get_available_parents(self):\n \"\"\"Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InheritanceGroup:\n \"\"\"A group that allow inheritance of permissions. The groups that a group will inherit from is given by the `parents` field. The permissions that this group has independently from its parents are given by the `own_permissions` field. The standard `permissions` field will contain the groups own permissions, and those it has inherited. This field should not be altered, as any change will get overwritten.\"\"\"\n\n def update_permissions(self):\n \"\"\"Update the permissions of this and all sub groups.\"\"\"\n permissions = list(self.own_permissions.all())\n for parent in self.parents.all():\n permissions += list(parent.permissions.all())\n self.permissions.set(permissions)\n for sub in self.sub_groups.all():\n sub.update_permissions()\n\n def get_sub_groups(self):\n \"\"\"Return a queryset of all groups that inherits from this group.\"\"\"\n subs = self.sub_groups.all()\n for sub in self.sub_groups.all():\n subs = subs.union(sub.get_sub_groups())\n return subs\n\n def get_all_parents(self):\n \"\"\"Return a queryset of all groups that this group inherits from.\"\"\"\n parents = self.parents.all()\n for parent in self.parents.all():\n parents = parents.union(parent.get_all_parents())\n return parents\n\n def get_available_parents(self):\n \"\"\"Return a queryset of all groups that can be a parent to this group. This excludes any group that inherits from this group, as that would cause a circular dependency.\"\"\"\n parents = InheritanceGroup.objects.exclude(pk=self.pk)\n for sub in self.get_sub_groups():\n parents = parents.exclude(pk=sub.pk)\n return parents\n", "source": "the_stack_v2_python_sparse", "source_path": "members/models.py", "source_repo": "kalkins/buk-django", "split": "test", "star_events_count": 4} {"blob_id": "4a7cbccfaecbf79d0097a98bff3c15ba0e92f7f9", "bodies": ["Editeur.__init__(self, pere, objet, attribut)\nself.ajouter_option('b', self.opt_descendre_membre)\nself.ajouter_option('d', self.opt_suppr_membre)\nself.ajouter_option('h', self.opt_remonter_membre)", "squelette = self.objet\nmsg = '| |tit|' + 'Edition des membres de {}'.format(squelette.cle).ljust(76)\nmsg += '|ff||\\n' + self.opts.separateur + '\\n'\nmsg += '\\nOptions supportées :'\nmsg += '\\n |cmd|/b |ff| pour descendre le membre'\nmsg += '\\n |cmd|/d |ff| pour supprimer le membre'\nmsg += '\\n |cmd|/h |ff| pour remonter le membre\\n'\nmsg += self.aide_courte\nmsg += 'Membres courants :\\n'\nmembres = squelette.membres\nliste_membres = ''\nfor membre in membres:\n ligne = '\\n |ent|' + membre.nom.ljust(10) + '|ff|'\n liste_membres += ligne\nif not liste_membres:\n liste_membres += \"\\n |att|Aucun membre pour l'instant.|ff|\"\nmsg += liste_membres\nreturn msg", "squelette = self.objet\ntry:\n membre = squelette.get_membre(arguments)\nexcept KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\nelse:\n squelette.descendre_membre(membre.nom)\n self.actualiser()", "squelette = self.objet\ntry:\n membre = squelette.get_membre(arguments)\nexcept KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\nelse:\n squelette.supprimer_membre(membre.nom)\n self.actualiser()", "squelette = self.objet\ntry:\n membre = squelette.get_membre(arguments)\nexcept KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\nelse:\n squelette.remonter_membre(membre.nom)\n self.actualiser()", "squelette = self.objet\nmembres = squelette.membres\nnom = supprimer_accents(msg).lower()\ntry:\n membre = squelette.get_membre(nom)\nexcept KeyError:\n try:\n membre = squelette.ajouter_membre(msg)\n except ValueError:\n self.pere << '|err|Ce nom de membre est invalide.|ff|'\n return\nenveloppe = EnveloppeObjet(EdtMembre, membre, None)\nenveloppe.parent = self\nenveloppe.aide_courte = \"Entrez |ent|/|ff| pour revenir à la fenêtre parente.\\nOptions :\\n - |ent|/n |ff| : modifie le nom du membre\\n - |ent|/g |ff| : permet de regrouper plusieurs membres\\n - |ent|/p |ff| : change la probabilité de toucher le membre en combat\\n - |ent|/f |ff| : change l'état d'un flag\\n\"\ncontexte = enveloppe.construire(self.pere)\nself.migrer_contexte(contexte)"], "bodies_text": "<|body_start_0|>\n Editeur.__init__(self, pere, objet, attribut)\n self.ajouter_option('b', self.opt_descendre_membre)\n self.ajouter_option('d', self.opt_suppr_membre)\n self.ajouter_option('h', self.opt_remonter_membre)\n<|end_body_0|>\n\n<|body_start_1|>\n squelette = self.objet\n msg = '| |tit|' + 'Edition des membres de {}'.format(squelette.cle).ljust(76)\n msg += '|ff||\\n' + self.opts.separateur + '\\n'\n msg += '\\nOptions supportées :'\n msg += '\\n |cmd|/b |ff| pour descendre le membre'\n msg += '\\n |cmd|/d |ff| pour supprimer le membre'\n msg += '\\n |cmd|/h |ff| pour remonter le membre\\n'\n msg += self.aide_courte\n msg += 'Membres courants :\\n'\n membres = squelette.membres\n liste_membres = ''\n for membre in membres:\n ligne = '\\n |ent|' + membre.nom.ljust(10) + '|ff|'\n liste_membres += ligne\n if not liste_membres:\n liste_membres += \"\\n |att|Aucun membre pour l'instant.|ff|\"\n msg += liste_membres\n return msg\n<|end_body_1|>\n\n<|body_start_2|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.descendre_membre(membre.nom)\n self.actualiser()\n<|end_body_2|>\n\n<|body_start_3|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.supprimer_membre(membre.nom)\n self.actualiser()\n<|end_body_3|>\n\n<|body_start_4|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.remonter_membre(membre.nom)\n self.actualiser()\n<|end_body_4|>\n\n<|body_start_5|>\n squelette = self.objet\n membres = squelette.membres\n nom = supprimer_accents(msg).lower()\n try:\n membre = squelette.get_membre(nom)\n except KeyError:\n try:\n membre = squelette.ajouter_membre(msg)\n except ValueError:\n self.pere << '|err|Ce nom de membre est invalide.|ff|'\n return\n enveloppe = EnveloppeObjet(EdtMembre, membre, None)\n enveloppe.parent = self\n enveloppe.aide_courte = \"Entrez |ent|/|ff| pour revenir à la fenêtre parente.\\nOptions :\\n - |ent|/n |ff| : modifie le nom du membre\\n - |ent|/g |ff| : permet de regrouper plusieurs membres\\n - |ent|/p |ff| : change la probabilité de toucher le membre en combat\\n - |ent|/f |ff| : change l'état d'un flag\\n\"\n contexte = enveloppe.construire(self.pere)\n self.migrer_contexte(contexte)\n<|end_body_5|>\n", "class_docstring": "Contexte-éditeur d'édition des membres.", "class_name": "EdtMembres", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EdtMembres:\n \"\"\"Contexte-éditeur d'édition des membres.\"\"\"\n\n def __init__(self, pere, objet=None, attribut=None):\n \"\"\"Constructeur de l'éditeur\"\"\"\n <|body_0|>\n\n def accueil(self):\n \"\"\"Message d'accueil du contexte\"\"\"\n <|body_1|>\n\n def opt_descendre_membre(self, arguments):\n \"\"\"Descend un membre dans la liste. Syntaxe : /b \"\"\"\n <|body_2|>\n\n def opt_suppr_membre(self, arguments):\n \"\"\"Supprime un membre Syntaxe : /d \"\"\"\n <|body_3|>\n\n def opt_remonter_membre(self, arguments):\n \"\"\"Remonte un membre dans la liste. Syntaxe : /h \"\"\"\n <|body_4|>\n\n def interpreter(self, msg):\n \"\"\"Interprétation de la présentation\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Editeur.__init__(self, pere, objet, attribut)\n self.ajouter_option('b', self.opt_descendre_membre)\n self.ajouter_option('d', self.opt_suppr_membre)\n self.ajouter_option('h', self.opt_remonter_membre)\n<|end_body_0|>\n\n<|body_start_1|>\n squelette = self.objet\n msg = '| |tit|' + 'Edition des membres de {}'.format(squelette.cle).ljust(76)\n msg += '|ff||\\n' + self.opts.separateur + '\\n'\n msg += '\\nOptions supportées :'\n msg += '\\n |cmd|/b |ff| pour descendre le membre'\n msg += '\\n |cmd|/d |ff| pour supprimer le membre'\n msg += '\\n |cmd|/h |ff| pour remonter le membre\\n'\n msg += self.aide_courte\n msg += 'Membres courants :\\n'\n membres = squelette.membres\n liste_membres = ''\n for membre in membres:\n ligne = '\\n |ent|' + membre.nom.ljust(10) + '|ff|'\n liste_membres += ligne\n if not liste_membres:\n liste_membres += \"\\n |att|Aucun membre pour l'instant.|ff|\"\n msg += liste_membres\n return msg\n<|end_body_1|>\n\n<|body_start_2|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.descendre_membre(membre.nom)\n self.actualiser()\n<|end_body_2|>\n\n<|body_start_3|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.supprimer_membre(membre.nom)\n self.actualiser()\n<|end_body_3|>\n\n<|body_start_4|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.remonter_membre(membre.nom)\n self.actualiser()\n<|end_body_4|>\n\n<|body_start_5|>\n squelette = self.objet\n membres = squelette.membres\n nom = supprimer_accents(msg).lower()\n try:\n membre = squelette.get_membre(nom)\n except KeyError:\n try:\n membre = squelette.ajouter_membre(msg)\n except ValueError:\n self.pere << '|err|Ce nom de membre est invalide.|ff|'\n return\n enveloppe = EnveloppeObjet(EdtMembre, membre, None)\n enveloppe.parent = self\n enveloppe.aide_courte = \"Entrez |ent|/|ff| pour revenir à la fenêtre parente.\\nOptions :\\n - |ent|/n |ff| : modifie le nom du membre\\n - |ent|/g |ff| : permet de regrouper plusieurs membres\\n - |ent|/p |ff| : change la probabilité de toucher le membre en combat\\n - |ent|/f |ff| : change l'état d'un flag\\n\"\n contexte = enveloppe.construire(self.pere)\n self.migrer_contexte(contexte)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000035", "length_bytes": 5617, "license_type": "permissive", "methods": [{"docstring": "Constructeur de l'éditeur", "name": "__init__", "signature": "def __init__(self, pere, objet=None, attribut=None)"}, {"docstring": "Message d'accueil du contexte", "name": "accueil", "signature": "def accueil(self)"}, {"docstring": "Descend un membre dans la liste. Syntaxe : /b ", "name": "opt_descendre_membre", "signature": "def opt_descendre_membre(self, arguments)"}, {"docstring": "Supprime un membre Syntaxe : /d ", "name": "opt_suppr_membre", "signature": "def opt_suppr_membre(self, arguments)"}, {"docstring": "Remonte un membre dans la liste. Syntaxe : /h ", "name": "opt_remonter_membre", "signature": "def opt_remonter_membre(self, arguments)"}, {"docstring": "Interprétation de la présentation", "name": "interpreter", "signature": "def interpreter(self, msg)"}], "n_methods": 6, "prompt": "Implement the Python class `EdtMembres` described below.\n\nClass description:\nContexte-éditeur d'édition des membres.\n\nMethod signatures and docstrings:\n- def __init__(self, pere, objet=None, attribut=None): Constructeur de l'éditeur\n- def accueil(self): Message d'accueil du contexte\n- def opt_descendre_membre(self, arguments): Descend un membre dans la liste. Syntaxe : /b \n- def opt_suppr_membre(self, arguments): Supprime un membre Syntaxe : /d \n- def opt_remonter_membre(self, arguments): Remonte un membre dans la liste. Syntaxe : /h \n- def interpreter(self, msg): Interprétation de la présentation", "prompted_full_text": "Implement the Python class `EdtMembres` described below.\n\nClass description:\nContexte-éditeur d'édition des membres.\n\nMethod signatures and docstrings:\n- def __init__(self, pere, objet=None, attribut=None): Constructeur de l'éditeur\n- def accueil(self): Message d'accueil du contexte\n- def opt_descendre_membre(self, arguments): Descend un membre dans la liste. Syntaxe : /b \n- def opt_suppr_membre(self, arguments): Supprime un membre Syntaxe : /d \n- def opt_remonter_membre(self, arguments): Remonte un membre dans la liste. Syntaxe : /h \n- def interpreter(self, msg): Interprétation de la présentation\n\n<|skeleton|>\nclass EdtMembres:\n \"\"\"Contexte-éditeur d'édition des membres.\"\"\"\n\n def __init__(self, pere, objet=None, attribut=None):\n \"\"\"Constructeur de l'éditeur\"\"\"\n <|body_0|>\n\n def accueil(self):\n \"\"\"Message d'accueil du contexte\"\"\"\n <|body_1|>\n\n def opt_descendre_membre(self, arguments):\n \"\"\"Descend un membre dans la liste. Syntaxe : /b \"\"\"\n <|body_2|>\n\n def opt_suppr_membre(self, arguments):\n \"\"\"Supprime un membre Syntaxe : /d \"\"\"\n <|body_3|>\n\n def opt_remonter_membre(self, arguments):\n \"\"\"Remonte un membre dans la liste. Syntaxe : /h \"\"\"\n <|body_4|>\n\n def interpreter(self, msg):\n \"\"\"Interprétation de la présentation\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Editeur.__init__(self, pere, objet, attribut)\n self.ajouter_option('b', self.opt_descendre_membre)\n self.ajouter_option('d', self.opt_suppr_membre)\n self.ajouter_option('h', self.opt_remonter_membre)\n<|end_body_0|>\n\n<|body_start_1|>\n squelette = self.objet\n msg = '| |tit|' + 'Edition des membres de {}'.format(squelette.cle).ljust(76)\n msg += '|ff||\\n' + self.opts.separateur + '\\n'\n msg += '\\nOptions supportées :'\n msg += '\\n |cmd|/b |ff| pour descendre le membre'\n msg += '\\n |cmd|/d |ff| pour supprimer le membre'\n msg += '\\n |cmd|/h |ff| pour remonter le membre\\n'\n msg += self.aide_courte\n msg += 'Membres courants :\\n'\n membres = squelette.membres\n liste_membres = ''\n for membre in membres:\n ligne = '\\n |ent|' + membre.nom.ljust(10) + '|ff|'\n liste_membres += ligne\n if not liste_membres:\n liste_membres += \"\\n |att|Aucun membre pour l'instant.|ff|\"\n msg += liste_membres\n return msg\n<|end_body_1|>\n\n<|body_start_2|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.descendre_membre(membre.nom)\n self.actualiser()\n<|end_body_2|>\n\n<|body_start_3|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.supprimer_membre(membre.nom)\n self.actualiser()\n<|end_body_3|>\n\n<|body_start_4|>\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.remonter_membre(membre.nom)\n self.actualiser()\n<|end_body_4|>\n\n<|body_start_5|>\n squelette = self.objet\n membres = squelette.membres\n nom = supprimer_accents(msg).lower()\n try:\n membre = squelette.get_membre(nom)\n except KeyError:\n try:\n membre = squelette.ajouter_membre(msg)\n except ValueError:\n self.pere << '|err|Ce nom de membre est invalide.|ff|'\n return\n enveloppe = EnveloppeObjet(EdtMembre, membre, None)\n enveloppe.parent = self\n enveloppe.aide_courte = \"Entrez |ent|/|ff| pour revenir à la fenêtre parente.\\nOptions :\\n - |ent|/n |ff| : modifie le nom du membre\\n - |ent|/g |ff| : permet de regrouper plusieurs membres\\n - |ent|/p |ff| : change la probabilité de toucher le membre en combat\\n - |ent|/f |ff| : change l'état d'un flag\\n\"\n contexte = enveloppe.construire(self.pere)\n self.migrer_contexte(contexte)\n<|end_body_5|>\n", "revision_id": "7e93bff08cdf891352efba587e89c40f3b4a2301", "skeleton": "<|skeleton|>\nclass EdtMembres:\n \"\"\"Contexte-éditeur d'édition des membres.\"\"\"\n\n def __init__(self, pere, objet=None, attribut=None):\n \"\"\"Constructeur de l'éditeur\"\"\"\n <|body_0|>\n\n def accueil(self):\n \"\"\"Message d'accueil du contexte\"\"\"\n <|body_1|>\n\n def opt_descendre_membre(self, arguments):\n \"\"\"Descend un membre dans la liste. Syntaxe : /b \"\"\"\n <|body_2|>\n\n def opt_suppr_membre(self, arguments):\n \"\"\"Supprime un membre Syntaxe : /d \"\"\"\n <|body_3|>\n\n def opt_remonter_membre(self, arguments):\n \"\"\"Remonte un membre dans la liste. Syntaxe : /h \"\"\"\n <|body_4|>\n\n def interpreter(self, msg):\n \"\"\"Interprétation de la présentation\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EdtMembres:\n \"\"\"Contexte-éditeur d'édition des membres.\"\"\"\n\n def __init__(self, pere, objet=None, attribut=None):\n \"\"\"Constructeur de l'éditeur\"\"\"\n Editeur.__init__(self, pere, objet, attribut)\n self.ajouter_option('b', self.opt_descendre_membre)\n self.ajouter_option('d', self.opt_suppr_membre)\n self.ajouter_option('h', self.opt_remonter_membre)\n\n def accueil(self):\n \"\"\"Message d'accueil du contexte\"\"\"\n squelette = self.objet\n msg = '| |tit|' + 'Edition des membres de {}'.format(squelette.cle).ljust(76)\n msg += '|ff||\\n' + self.opts.separateur + '\\n'\n msg += '\\nOptions supportées :'\n msg += '\\n |cmd|/b |ff| pour descendre le membre'\n msg += '\\n |cmd|/d |ff| pour supprimer le membre'\n msg += '\\n |cmd|/h |ff| pour remonter le membre\\n'\n msg += self.aide_courte\n msg += 'Membres courants :\\n'\n membres = squelette.membres\n liste_membres = ''\n for membre in membres:\n ligne = '\\n |ent|' + membre.nom.ljust(10) + '|ff|'\n liste_membres += ligne\n if not liste_membres:\n liste_membres += \"\\n |att|Aucun membre pour l'instant.|ff|\"\n msg += liste_membres\n return msg\n\n def opt_descendre_membre(self, arguments):\n \"\"\"Descend un membre dans la liste. Syntaxe : /b \"\"\"\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.descendre_membre(membre.nom)\n self.actualiser()\n\n def opt_suppr_membre(self, arguments):\n \"\"\"Supprime un membre Syntaxe : /d \"\"\"\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.supprimer_membre(membre.nom)\n self.actualiser()\n\n def opt_remonter_membre(self, arguments):\n \"\"\"Remonte un membre dans la liste. Syntaxe : /h \"\"\"\n squelette = self.objet\n try:\n membre = squelette.get_membre(arguments)\n except KeyError:\n self.pere << '|err|Ce membre est introuvable.|ff|'\n else:\n squelette.remonter_membre(membre.nom)\n self.actualiser()\n\n def interpreter(self, msg):\n \"\"\"Interprétation de la présentation\"\"\"\n squelette = self.objet\n membres = squelette.membres\n nom = supprimer_accents(msg).lower()\n try:\n membre = squelette.get_membre(nom)\n except KeyError:\n try:\n membre = squelette.ajouter_membre(msg)\n except ValueError:\n self.pere << '|err|Ce nom de membre est invalide.|ff|'\n return\n enveloppe = EnveloppeObjet(EdtMembre, membre, None)\n enveloppe.parent = self\n enveloppe.aide_courte = \"Entrez |ent|/|ff| pour revenir à la fenêtre parente.\\nOptions :\\n - |ent|/n |ff| : modifie le nom du membre\\n - |ent|/g |ff| : permet de regrouper plusieurs membres\\n - |ent|/p |ff| : change la probabilité de toucher le membre en combat\\n - |ent|/f |ff| : change l'état d'un flag\\n\"\n contexte = enveloppe.construire(self.pere)\n self.migrer_contexte(contexte)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/primaires/perso/editeurs/skedit/edt_membres.py", "source_repo": "vincent-lg/tsunami", "split": "test", "star_events_count": 5} {"blob_id": "b221461b3857f1b937beb94ac34cca001837c9fb", "bodies": ["normalizer = Nondimensional()\nnormalizer._configure()\ntstep = TimeStepUniform()\ntstep._configure()\ntstep.preinitialize()\ntstep.verifyConfiguration()\ntstep.initialize(normalizer)\nself.tstep = tstep\nreturn", "tstep = self.tstep\nself.assertEqual(1, tstep.numTimeSteps())\ntstep.totalTimeN = 4.0\ntstep.dtN = 2.0\nself.assertEqual(3, tstep.numTimeSteps())\nreturn", "tstep = self.tstep\nintegrators = [Integrator(4.0), Integrator(8.0)]\nfrom pylith.topology.Mesh import Mesh\nmesh = Mesh()\nself.assertEqual(1.0, tstep.timeStep(mesh, integrators))\ntstep.dtN = 0.5\nself.assertEqual(0.5, tstep.timeStep(mesh, integrators))\ncaught = False\ntry:\n tstep.dtN = 10.0\n tstep.timeStep(mesh, integrators)\nexcept RuntimeError:\n caught = True\nself.failUnless(caught)\nreturn", "tstep = self.tstep\nself.assertEqual(1.0, tstep.currentStep())\ntstep.dtN = 0.0001\nself.assertEqual(0.0001, tstep.currentStep())\nreturn", "from pylith.problems.TimeStepUniform import time_step\nts = time_step()\nreturn"], "bodies_text": "<|body_start_0|>\n normalizer = Nondimensional()\n normalizer._configure()\n tstep = TimeStepUniform()\n tstep._configure()\n tstep.preinitialize()\n tstep.verifyConfiguration()\n tstep.initialize(normalizer)\n self.tstep = tstep\n return\n<|end_body_0|>\n\n<|body_start_1|>\n tstep = self.tstep\n self.assertEqual(1, tstep.numTimeSteps())\n tstep.totalTimeN = 4.0\n tstep.dtN = 2.0\n self.assertEqual(3, tstep.numTimeSteps())\n return\n<|end_body_1|>\n\n<|body_start_2|>\n tstep = self.tstep\n integrators = [Integrator(4.0), Integrator(8.0)]\n from pylith.topology.Mesh import Mesh\n mesh = Mesh()\n self.assertEqual(1.0, tstep.timeStep(mesh, integrators))\n tstep.dtN = 0.5\n self.assertEqual(0.5, tstep.timeStep(mesh, integrators))\n caught = False\n try:\n tstep.dtN = 10.0\n tstep.timeStep(mesh, integrators)\n except RuntimeError:\n caught = True\n self.failUnless(caught)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n tstep = self.tstep\n self.assertEqual(1.0, tstep.currentStep())\n tstep.dtN = 0.0001\n self.assertEqual(0.0001, tstep.currentStep())\n return\n<|end_body_3|>\n\n<|body_start_4|>\n from pylith.problems.TimeStepUniform import time_step\n ts = time_step()\n return\n<|end_body_4|>\n", "class_docstring": "Unit testing of TimeStepUniform object.", "class_name": "TestTimeStepUniform", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestTimeStepUniform:\n \"\"\"Unit testing of TimeStepUniform object.\"\"\"\n\n def setUp(self):\n \"\"\"Setup time step object.\"\"\"\n <|body_0|>\n\n def test_numTimeSteps(self):\n \"\"\"Test numTimeSteps().\"\"\"\n <|body_1|>\n\n def test_timeStep(self):\n \"\"\"Test timeStep().\"\"\"\n <|body_2|>\n\n def test_currentStep(self):\n \"\"\"Test currentStep().\"\"\"\n <|body_3|>\n\n def test_factory(self):\n \"\"\"Test factory method.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n normalizer = Nondimensional()\n normalizer._configure()\n tstep = TimeStepUniform()\n tstep._configure()\n tstep.preinitialize()\n tstep.verifyConfiguration()\n tstep.initialize(normalizer)\n self.tstep = tstep\n return\n<|end_body_0|>\n\n<|body_start_1|>\n tstep = self.tstep\n self.assertEqual(1, tstep.numTimeSteps())\n tstep.totalTimeN = 4.0\n tstep.dtN = 2.0\n self.assertEqual(3, tstep.numTimeSteps())\n return\n<|end_body_1|>\n\n<|body_start_2|>\n tstep = self.tstep\n integrators = [Integrator(4.0), Integrator(8.0)]\n from pylith.topology.Mesh import Mesh\n mesh = Mesh()\n self.assertEqual(1.0, tstep.timeStep(mesh, integrators))\n tstep.dtN = 0.5\n self.assertEqual(0.5, tstep.timeStep(mesh, integrators))\n caught = False\n try:\n tstep.dtN = 10.0\n tstep.timeStep(mesh, integrators)\n except RuntimeError:\n caught = True\n self.failUnless(caught)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n tstep = self.tstep\n self.assertEqual(1.0, tstep.currentStep())\n tstep.dtN = 0.0001\n self.assertEqual(0.0001, tstep.currentStep())\n return\n<|end_body_3|>\n\n<|body_start_4|>\n from pylith.problems.TimeStepUniform import time_step\n ts = time_step()\n return\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000036", "length_bytes": 2679, "license_type": "permissive", "methods": [{"docstring": "Setup time step object.", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Test numTimeSteps().", "name": "test_numTimeSteps", "signature": "def test_numTimeSteps(self)"}, {"docstring": "Test timeStep().", "name": "test_timeStep", "signature": "def test_timeStep(self)"}, {"docstring": "Test currentStep().", "name": "test_currentStep", "signature": "def test_currentStep(self)"}, {"docstring": "Test factory method.", "name": "test_factory", "signature": "def test_factory(self)"}], "n_methods": 5, "prompt": "Implement the Python class `TestTimeStepUniform` described below.\n\nClass description:\nUnit testing of TimeStepUniform object.\n\nMethod signatures and docstrings:\n- def setUp(self): Setup time step object.\n- def test_numTimeSteps(self): Test numTimeSteps().\n- def test_timeStep(self): Test timeStep().\n- def test_currentStep(self): Test currentStep().\n- def test_factory(self): Test factory method.", "prompted_full_text": "Implement the Python class `TestTimeStepUniform` described below.\n\nClass description:\nUnit testing of TimeStepUniform object.\n\nMethod signatures and docstrings:\n- def setUp(self): Setup time step object.\n- def test_numTimeSteps(self): Test numTimeSteps().\n- def test_timeStep(self): Test timeStep().\n- def test_currentStep(self): Test currentStep().\n- def test_factory(self): Test factory method.\n\n<|skeleton|>\nclass TestTimeStepUniform:\n \"\"\"Unit testing of TimeStepUniform object.\"\"\"\n\n def setUp(self):\n \"\"\"Setup time step object.\"\"\"\n <|body_0|>\n\n def test_numTimeSteps(self):\n \"\"\"Test numTimeSteps().\"\"\"\n <|body_1|>\n\n def test_timeStep(self):\n \"\"\"Test timeStep().\"\"\"\n <|body_2|>\n\n def test_currentStep(self):\n \"\"\"Test currentStep().\"\"\"\n <|body_3|>\n\n def test_factory(self):\n \"\"\"Test factory method.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n normalizer = Nondimensional()\n normalizer._configure()\n tstep = TimeStepUniform()\n tstep._configure()\n tstep.preinitialize()\n tstep.verifyConfiguration()\n tstep.initialize(normalizer)\n self.tstep = tstep\n return\n<|end_body_0|>\n\n<|body_start_1|>\n tstep = self.tstep\n self.assertEqual(1, tstep.numTimeSteps())\n tstep.totalTimeN = 4.0\n tstep.dtN = 2.0\n self.assertEqual(3, tstep.numTimeSteps())\n return\n<|end_body_1|>\n\n<|body_start_2|>\n tstep = self.tstep\n integrators = [Integrator(4.0), Integrator(8.0)]\n from pylith.topology.Mesh import Mesh\n mesh = Mesh()\n self.assertEqual(1.0, tstep.timeStep(mesh, integrators))\n tstep.dtN = 0.5\n self.assertEqual(0.5, tstep.timeStep(mesh, integrators))\n caught = False\n try:\n tstep.dtN = 10.0\n tstep.timeStep(mesh, integrators)\n except RuntimeError:\n caught = True\n self.failUnless(caught)\n return\n<|end_body_2|>\n\n<|body_start_3|>\n tstep = self.tstep\n self.assertEqual(1.0, tstep.currentStep())\n tstep.dtN = 0.0001\n self.assertEqual(0.0001, tstep.currentStep())\n return\n<|end_body_3|>\n\n<|body_start_4|>\n from pylith.problems.TimeStepUniform import time_step\n ts = time_step()\n return\n<|end_body_4|>\n", "revision_id": "8d0170324d3fcdc5e6c4281759c680faa5dd8d38", "skeleton": "<|skeleton|>\nclass TestTimeStepUniform:\n \"\"\"Unit testing of TimeStepUniform object.\"\"\"\n\n def setUp(self):\n \"\"\"Setup time step object.\"\"\"\n <|body_0|>\n\n def test_numTimeSteps(self):\n \"\"\"Test numTimeSteps().\"\"\"\n <|body_1|>\n\n def test_timeStep(self):\n \"\"\"Test timeStep().\"\"\"\n <|body_2|>\n\n def test_currentStep(self):\n \"\"\"Test currentStep().\"\"\"\n <|body_3|>\n\n def test_factory(self):\n \"\"\"Test factory method.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestTimeStepUniform:\n \"\"\"Unit testing of TimeStepUniform object.\"\"\"\n\n def setUp(self):\n \"\"\"Setup time step object.\"\"\"\n normalizer = Nondimensional()\n normalizer._configure()\n tstep = TimeStepUniform()\n tstep._configure()\n tstep.preinitialize()\n tstep.verifyConfiguration()\n tstep.initialize(normalizer)\n self.tstep = tstep\n return\n\n def test_numTimeSteps(self):\n \"\"\"Test numTimeSteps().\"\"\"\n tstep = self.tstep\n self.assertEqual(1, tstep.numTimeSteps())\n tstep.totalTimeN = 4.0\n tstep.dtN = 2.0\n self.assertEqual(3, tstep.numTimeSteps())\n return\n\n def test_timeStep(self):\n \"\"\"Test timeStep().\"\"\"\n tstep = self.tstep\n integrators = [Integrator(4.0), Integrator(8.0)]\n from pylith.topology.Mesh import Mesh\n mesh = Mesh()\n self.assertEqual(1.0, tstep.timeStep(mesh, integrators))\n tstep.dtN = 0.5\n self.assertEqual(0.5, tstep.timeStep(mesh, integrators))\n caught = False\n try:\n tstep.dtN = 10.0\n tstep.timeStep(mesh, integrators)\n except RuntimeError:\n caught = True\n self.failUnless(caught)\n return\n\n def test_currentStep(self):\n \"\"\"Test currentStep().\"\"\"\n tstep = self.tstep\n self.assertEqual(1.0, tstep.currentStep())\n tstep.dtN = 0.0001\n self.assertEqual(0.0001, tstep.currentStep())\n return\n\n def test_factory(self):\n \"\"\"Test factory method.\"\"\"\n from pylith.problems.TimeStepUniform import time_step\n ts = time_step()\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "unittests/pytests/problems/TestTimeStepUniform.py", "source_repo": "rwalkerlewis/pylith", "split": "test", "star_events_count": 0} {"blob_id": "e2063b4154f217d2ff5041f56af8865f22ccaa65", "bodies": ["challenges: List[Dict[str, Any]] = []\nchallenges = TurboChallenges.Table(self, challenges)\nUtility.WriteFile(self, f'{self.eXAssets}/turboChallenges.json', challenges)\nlog.info(f'Compiled {len(challenges):,} Tomogunchi Turbo Challenges')", "table: List[Dict[str, Any]] = Utility.ReadCSV(self, f'{self.iXAssets}/mp/petwatchturbotable.csv', PetWatchTurboTable)\nif table is None:\n return challenges\nfor entry in table:\n challenges.append({'altId': entry.get('ref'), 'phase': entry.get('phaseNum'), 'description': self.localize.get(entry.get('challengeDesc')), 'phaseTime': entry.get('phaseTime'), 'maxBonusTime': entry.get('bonusTimeMax'), 'charmAltId': None if (cid := entry.get('charmID')) is None else f'cos_{cid}'})\nreturn challenges"], "bodies_text": "<|body_start_0|>\n challenges: List[Dict[str, Any]] = []\n challenges = TurboChallenges.Table(self, challenges)\n Utility.WriteFile(self, f'{self.eXAssets}/turboChallenges.json', challenges)\n log.info(f'Compiled {len(challenges):,} Tomogunchi Turbo Challenges')\n<|end_body_0|>\n\n<|body_start_1|>\n table: List[Dict[str, Any]] = Utility.ReadCSV(self, f'{self.iXAssets}/mp/petwatchturbotable.csv', PetWatchTurboTable)\n if table is None:\n return challenges\n for entry in table:\n challenges.append({'altId': entry.get('ref'), 'phase': entry.get('phaseNum'), 'description': self.localize.get(entry.get('challengeDesc')), 'phaseTime': entry.get('phaseTime'), 'maxBonusTime': entry.get('bonusTimeMax'), 'charmAltId': None if (cid := entry.get('charmID')) is None else f'cos_{cid}'})\n return challenges\n<|end_body_1|>\n", "class_docstring": "Tomogunchi Turbo Challenges XAssets.", "class_name": "TurboChallenges", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TurboChallenges:\n \"\"\"Tomogunchi Turbo Challenges XAssets.\"\"\"\n\n def Compile(self: Any) -> None:\n \"\"\"Compile the Tomogunchi Turbo Challenges XAssets.\"\"\"\n <|body_0|>\n\n def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Compile the mp/petwatchturbotable.csv XAsset.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n challenges: List[Dict[str, Any]] = []\n challenges = TurboChallenges.Table(self, challenges)\n Utility.WriteFile(self, f'{self.eXAssets}/turboChallenges.json', challenges)\n log.info(f'Compiled {len(challenges):,} Tomogunchi Turbo Challenges')\n<|end_body_0|>\n\n<|body_start_1|>\n table: List[Dict[str, Any]] = Utility.ReadCSV(self, f'{self.iXAssets}/mp/petwatchturbotable.csv', PetWatchTurboTable)\n if table is None:\n return challenges\n for entry in table:\n challenges.append({'altId': entry.get('ref'), 'phase': entry.get('phaseNum'), 'description': self.localize.get(entry.get('challengeDesc')), 'phaseTime': entry.get('phaseTime'), 'maxBonusTime': entry.get('bonusTimeMax'), 'charmAltId': None if (cid := entry.get('charmID')) is None else f'cos_{cid}'})\n return challenges\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000037", "length_bytes": 13794, "license_type": "permissive", "methods": [{"docstring": "Compile the Tomogunchi Turbo Challenges XAssets.", "name": "Compile", "signature": "def Compile(self: Any) -> None"}, {"docstring": "Compile the mp/petwatchturbotable.csv XAsset.", "name": "Table", "signature": "def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028010", "prompt": "Implement the Python class `TurboChallenges` described below.\n\nClass description:\nTomogunchi Turbo Challenges XAssets.\n\nMethod signatures and docstrings:\n- def Compile(self: Any) -> None: Compile the Tomogunchi Turbo Challenges XAssets.\n- def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]: Compile the mp/petwatchturbotable.csv XAsset.", "prompted_full_text": "Implement the Python class `TurboChallenges` described below.\n\nClass description:\nTomogunchi Turbo Challenges XAssets.\n\nMethod signatures and docstrings:\n- def Compile(self: Any) -> None: Compile the Tomogunchi Turbo Challenges XAssets.\n- def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]: Compile the mp/petwatchturbotable.csv XAsset.\n\n<|skeleton|>\nclass TurboChallenges:\n \"\"\"Tomogunchi Turbo Challenges XAssets.\"\"\"\n\n def Compile(self: Any) -> None:\n \"\"\"Compile the Tomogunchi Turbo Challenges XAssets.\"\"\"\n <|body_0|>\n\n def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Compile the mp/petwatchturbotable.csv XAsset.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n challenges: List[Dict[str, Any]] = []\n challenges = TurboChallenges.Table(self, challenges)\n Utility.WriteFile(self, f'{self.eXAssets}/turboChallenges.json', challenges)\n log.info(f'Compiled {len(challenges):,} Tomogunchi Turbo Challenges')\n<|end_body_0|>\n\n<|body_start_1|>\n table: List[Dict[str, Any]] = Utility.ReadCSV(self, f'{self.iXAssets}/mp/petwatchturbotable.csv', PetWatchTurboTable)\n if table is None:\n return challenges\n for entry in table:\n challenges.append({'altId': entry.get('ref'), 'phase': entry.get('phaseNum'), 'description': self.localize.get(entry.get('challengeDesc')), 'phaseTime': entry.get('phaseTime'), 'maxBonusTime': entry.get('bonusTimeMax'), 'charmAltId': None if (cid := entry.get('charmID')) is None else f'cos_{cid}'})\n return challenges\n<|end_body_1|>\n", "revision_id": "82d3198a64eb2905e96dd536ce2f0acb52f9ce77", "skeleton": "<|skeleton|>\nclass TurboChallenges:\n \"\"\"Tomogunchi Turbo Challenges XAssets.\"\"\"\n\n def Compile(self: Any) -> None:\n \"\"\"Compile the Tomogunchi Turbo Challenges XAssets.\"\"\"\n <|body_0|>\n\n def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Compile the mp/petwatchturbotable.csv XAsset.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TurboChallenges:\n \"\"\"Tomogunchi Turbo Challenges XAssets.\"\"\"\n\n def Compile(self: Any) -> None:\n \"\"\"Compile the Tomogunchi Turbo Challenges XAssets.\"\"\"\n challenges: List[Dict[str, Any]] = []\n challenges = TurboChallenges.Table(self, challenges)\n Utility.WriteFile(self, f'{self.eXAssets}/turboChallenges.json', challenges)\n log.info(f'Compiled {len(challenges):,} Tomogunchi Turbo Challenges')\n\n def Table(self: Any, challenges: List[Dict[str, Any]]) -> List[Dict[str, Any]]:\n \"\"\"Compile the mp/petwatchturbotable.csv XAsset.\"\"\"\n table: List[Dict[str, Any]] = Utility.ReadCSV(self, f'{self.iXAssets}/mp/petwatchturbotable.csv', PetWatchTurboTable)\n if table is None:\n return challenges\n for entry in table:\n challenges.append({'altId': entry.get('ref'), 'phase': entry.get('phaseNum'), 'description': self.localize.get(entry.get('challengeDesc')), 'phaseTime': entry.get('phaseTime'), 'maxBonusTime': entry.get('bonusTimeMax'), 'charmAltId': None if (cid := entry.get('charmID')) is None else f'cos_{cid}'})\n return challenges\n", "source": "the_stack_v2_python_sparse", "source_path": "ModernWarfare/XAssets/challenges.py", "source_repo": "dbuentello/Hyde", "split": "test", "star_events_count": 0} {"blob_id": "cd3276ed69d81b835c55546b465cf5879510f07a", "bodies": ["self.name = str(theName)\nself.type = theType\nself.HP = theHP\nself.moves = theMoves\nself.shiny = isShiny", "print('\\nName: {self.name}'.format(self=self))\nprint('Type: {self.type}'.format(self=self))\nprint('Maximum HP: {self.HP}'.format(self=self))\nprint('Moves:')\nfor i in self.moves:\n print(i)\nif self.shiny == 0:\n return 'Shiny Status: Not Shiny'\nelse:\n return 'Shiny Status: Shiny!'", "card = [self.name, self.type, self.HP, self.shiny]\nfor i in self.moves:\n for x in i:\n card.append(x)\nreturn card"], "bodies_text": "<|body_start_0|>\n self.name = str(theName)\n self.type = theType\n self.HP = theHP\n self.moves = theMoves\n self.shiny = isShiny\n<|end_body_0|>\n\n<|body_start_1|>\n print('\\nName: {self.name}'.format(self=self))\n print('Type: {self.type}'.format(self=self))\n print('Maximum HP: {self.HP}'.format(self=self))\n print('Moves:')\n for i in self.moves:\n print(i)\n if self.shiny == 0:\n return 'Shiny Status: Not Shiny'\n else:\n return 'Shiny Status: Shiny!'\n<|end_body_1|>\n\n<|body_start_2|>\n card = [self.name, self.type, self.HP, self.shiny]\n for i in self.moves:\n for x in i:\n card.append(x)\n return card\n<|end_body_2|>\n", "class_docstring": "Card class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.", "class_name": "Card", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Card:\n \"\"\"Card class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.\"\"\"\n\n def __init__(self, theName, theType, theHP, theMoves, isShiny):\n \"\"\"Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.\"\"\"\n <|body_1|>\n\n def save(self):\n \"\"\"Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = str(theName)\n self.type = theType\n self.HP = theHP\n self.moves = theMoves\n self.shiny = isShiny\n<|end_body_0|>\n\n<|body_start_1|>\n print('\\nName: {self.name}'.format(self=self))\n print('Type: {self.type}'.format(self=self))\n print('Maximum HP: {self.HP}'.format(self=self))\n print('Moves:')\n for i in self.moves:\n print(i)\n if self.shiny == 0:\n return 'Shiny Status: Not Shiny'\n else:\n return 'Shiny Status: Shiny!'\n<|end_body_1|>\n\n<|body_start_2|>\n card = [self.name, self.type, self.HP, self.shiny]\n for i in self.moves:\n for x in i:\n card.append(x)\n return card\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000038", "length_bytes": 17713, "license_type": "no_license", "methods": [{"docstring": "Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.", "name": "__init__", "signature": "def __init__(self, theName, theType, theHP, theMoves, isShiny)"}, {"docstring": "___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.", "name": "__str__", "signature": "def __str__(self)"}, {"docstring": "Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.", "name": "save", "signature": "def save(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000148", "prompt": "Implement the Python class `Card` described below.\n\nClass description:\nCard class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.\n\nMethod signatures and docstrings:\n- def __init__(self, theName, theType, theHP, theMoves, isShiny): Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.\n- def __str__(self): ___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.\n- def save(self): Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.", "prompted_full_text": "Implement the Python class `Card` described below.\n\nClass description:\nCard class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.\n\nMethod signatures and docstrings:\n- def __init__(self, theName, theType, theHP, theMoves, isShiny): Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.\n- def __str__(self): ___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.\n- def save(self): Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.\n\n<|skeleton|>\nclass Card:\n \"\"\"Card class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.\"\"\"\n\n def __init__(self, theName, theType, theHP, theMoves, isShiny):\n \"\"\"Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.\"\"\"\n <|body_1|>\n\n def save(self):\n \"\"\"Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.name = str(theName)\n self.type = theType\n self.HP = theHP\n self.moves = theMoves\n self.shiny = isShiny\n<|end_body_0|>\n\n<|body_start_1|>\n print('\\nName: {self.name}'.format(self=self))\n print('Type: {self.type}'.format(self=self))\n print('Maximum HP: {self.HP}'.format(self=self))\n print('Moves:')\n for i in self.moves:\n print(i)\n if self.shiny == 0:\n return 'Shiny Status: Not Shiny'\n else:\n return 'Shiny Status: Shiny!'\n<|end_body_1|>\n\n<|body_start_2|>\n card = [self.name, self.type, self.HP, self.shiny]\n for i in self.moves:\n for x in i:\n card.append(x)\n return card\n<|end_body_2|>\n", "revision_id": "3c9bf4285b96532d89e75631dda8e507dc807c2f", "skeleton": "<|skeleton|>\nclass Card:\n \"\"\"Card class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.\"\"\"\n\n def __init__(self, theName, theType, theHP, theMoves, isShiny):\n \"\"\"Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.\"\"\"\n <|body_1|>\n\n def save(self):\n \"\"\"Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Card:\n \"\"\"Card class is used by the Deck class when accessing individual cards. Does not need to be called upon outside of the Deck class.\"\"\"\n\n def __init__(self, theName, theType, theHP, theMoves, isShiny):\n \"\"\"Initaliser for indiviual cards. Doesn't need to be called by user. Should be used by the Class:Deck functions. A card needs a Name, a Type, HP, List of Moves, and whether or not it is Shiny.\"\"\"\n self.name = str(theName)\n self.type = theType\n self.HP = theHP\n self.moves = theMoves\n self.shiny = isShiny\n\n def __str__(self):\n \"\"\"___str___ function is used when a indiviual card is called to be printed. Should be used by the Class:Deck functions.\"\"\"\n print('\\nName: {self.name}'.format(self=self))\n print('Type: {self.type}'.format(self=self))\n print('Maximum HP: {self.HP}'.format(self=self))\n print('Moves:')\n for i in self.moves:\n print(i)\n if self.shiny == 0:\n return 'Shiny Status: Not Shiny'\n else:\n return 'Shiny Status: Shiny!'\n\n def save(self):\n \"\"\"Save function is called by the Save To File function in the Deck Class. It returns the card varibles as a list, so that it can be appended to a new save file. The for loop here removes the moves from a list format and instead appends the individual varibles to the card list.\"\"\"\n card = [self.name, self.type, self.HP, self.shiny]\n for i in self.moves:\n for x in i:\n card.append(x)\n return card\n", "source": "the_stack_v2_python_sparse", "source_path": "TradingCards.py", "source_repo": "Kit-Bower-Morris/COMP517_ProgrammingFundamentals", "split": "test", "star_events_count": 0} {"blob_id": "61a77abcc6a8dc7c218f7df2fb2cf5cdd0dc6ede", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn AccessPackageAssignmentRequestCallbackData()", "from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\nfrom .custom_extension_data import CustomExtensionData\nfrom .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\nfrom .custom_extension_data import CustomExtensionData\nfields: Dict[str, Callable[[Any], None]] = {'customExtensionStageInstanceDetail': lambda n: setattr(self, 'custom_extension_stage_instance_detail', n.get_str_value()), 'customExtensionStageInstanceId': lambda n: setattr(self, 'custom_extension_stage_instance_id', n.get_str_value()), 'stage': lambda n: setattr(self, 'stage', n.get_enum_value(AccessPackageCustomExtensionStage)), 'state': lambda n: setattr(self, 'state', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('customExtensionStageInstanceDetail', self.custom_extension_stage_instance_detail)\nwriter.write_str_value('customExtensionStageInstanceId', self.custom_extension_stage_instance_id)\nwriter.write_enum_value('stage', self.stage)\nwriter.write_str_value('state', self.state)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequestCallbackData()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n fields: Dict[str, Callable[[Any], None]] = {'customExtensionStageInstanceDetail': lambda n: setattr(self, 'custom_extension_stage_instance_detail', n.get_str_value()), 'customExtensionStageInstanceId': lambda n: setattr(self, 'custom_extension_stage_instance_id', n.get_str_value()), 'stage': lambda n: setattr(self, 'stage', n.get_enum_value(AccessPackageCustomExtensionStage)), 'state': lambda n: setattr(self, 'state', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('customExtensionStageInstanceDetail', self.custom_extension_stage_instance_detail)\n writer.write_str_value('customExtensionStageInstanceId', self.custom_extension_stage_instance_id)\n writer.write_enum_value('stage', self.stage)\n writer.write_str_value('state', self.state)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "AccessPackageAssignmentRequestCallbackData", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccessPackageAssignmentRequestCallbackData:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequestCallbackData()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n fields: Dict[str, Callable[[Any], None]] = {'customExtensionStageInstanceDetail': lambda n: setattr(self, 'custom_extension_stage_instance_detail', n.get_str_value()), 'customExtensionStageInstanceId': lambda n: setattr(self, 'custom_extension_stage_instance_id', n.get_str_value()), 'stage': lambda n: setattr(self, 'stage', n.get_enum_value(AccessPackageCustomExtensionStage)), 'state': lambda n: setattr(self, 'state', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('customExtensionStageInstanceDetail', self.custom_extension_stage_instance_detail)\n writer.write_str_value('customExtensionStageInstanceId', self.custom_extension_stage_instance_id)\n writer.write_enum_value('stage', self.stage)\n writer.write_str_value('state', self.state)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000039", "length_bytes": 3917, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000099", "prompt": "Implement the Python class `AccessPackageAssignmentRequestCallbackData` described below.\n\nClass description:\nImplement the AccessPackageAssignmentRequestCallbackData class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `AccessPackageAssignmentRequestCallbackData` described below.\n\nClass description:\nImplement the AccessPackageAssignmentRequestCallbackData class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass AccessPackageAssignmentRequestCallbackData:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequestCallbackData()\n<|end_body_0|>\n\n<|body_start_1|>\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n fields: Dict[str, Callable[[Any], None]] = {'customExtensionStageInstanceDetail': lambda n: setattr(self, 'custom_extension_stage_instance_detail', n.get_str_value()), 'customExtensionStageInstanceId': lambda n: setattr(self, 'custom_extension_stage_instance_id', n.get_str_value()), 'stage': lambda n: setattr(self, 'stage', n.get_enum_value(AccessPackageCustomExtensionStage)), 'state': lambda n: setattr(self, 'state', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('customExtensionStageInstanceDetail', self.custom_extension_stage_instance_detail)\n writer.write_str_value('customExtensionStageInstanceId', self.custom_extension_stage_instance_id)\n writer.write_enum_value('stage', self.stage)\n writer.write_str_value('state', self.state)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass AccessPackageAssignmentRequestCallbackData:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccessPackageAssignmentRequestCallbackData:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> AccessPackageAssignmentRequestCallbackData:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: AccessPackageAssignmentRequestCallbackData\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return AccessPackageAssignmentRequestCallbackData()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n from .access_package_custom_extension_stage import AccessPackageCustomExtensionStage\n from .custom_extension_data import CustomExtensionData\n fields: Dict[str, Callable[[Any], None]] = {'customExtensionStageInstanceDetail': lambda n: setattr(self, 'custom_extension_stage_instance_detail', n.get_str_value()), 'customExtensionStageInstanceId': lambda n: setattr(self, 'custom_extension_stage_instance_id', n.get_str_value()), 'stage': lambda n: setattr(self, 'stage', n.get_enum_value(AccessPackageCustomExtensionStage)), 'state': lambda n: setattr(self, 'state', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('customExtensionStageInstanceDetail', self.custom_extension_stage_instance_detail)\n writer.write_str_value('customExtensionStageInstanceId', self.custom_extension_stage_instance_id)\n writer.write_enum_value('stage', self.stage)\n writer.write_str_value('state', self.state)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/access_package_assignment_request_callback_data.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "334cd0831327eb2f8eb4bad0ec1705f57c7f6725", "bodies": ["self.influx = influx\nself.bucket = bucket\nself.range_start = range_start\nself.range_stop = range_stop\nself.query = query\nself.imports = imports\nself.group = group\nself.value = None\nself.full_query = None\nself.query_prefix = f'from(bucket:\"{bucket}\") |> range(start: {range_start}, stop: {range_stop}) |>'\nif imports is not None:\n for i in imports:\n self.query_prefix = f'import \"{i}\" {self.query_prefix}'\nif group is None:\n self.query_postfix = DEFAULT_FUNCTION_FLUX\nelse:\n self.query_postfix = f'|> {group}(column: \"{INFLUX_CONF_VALUE_V2}\")'", "_LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)\ntry:\n rendered_query = self.query.render(parse_result=False)\nexcept TemplateError as ex:\n _LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)\n return\nself.full_query = f'{self.query_prefix} {rendered_query} {self.query_postfix}'\n_LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)\ntry:\n tables = self.influx.query(self.full_query)\nexcept (ConnectionError, ValueError) as exc:\n _LOGGER.error(exc)\n self.value = None\n return\nif not tables:\n _LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)\n self.value = None\nelse:\n if len(tables) > 1 or len(tables[0].records) > 1:\n _LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)\n self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]"], "bodies_text": "<|body_start_0|>\n self.influx = influx\n self.bucket = bucket\n self.range_start = range_start\n self.range_stop = range_stop\n self.query = query\n self.imports = imports\n self.group = group\n self.value = None\n self.full_query = None\n self.query_prefix = f'from(bucket:\"{bucket}\") |> range(start: {range_start}, stop: {range_stop}) |>'\n if imports is not None:\n for i in imports:\n self.query_prefix = f'import \"{i}\" {self.query_prefix}'\n if group is None:\n self.query_postfix = DEFAULT_FUNCTION_FLUX\n else:\n self.query_postfix = f'|> {group}(column: \"{INFLUX_CONF_VALUE_V2}\")'\n<|end_body_0|>\n\n<|body_start_1|>\n _LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)\n try:\n rendered_query = self.query.render(parse_result=False)\n except TemplateError as ex:\n _LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)\n return\n self.full_query = f'{self.query_prefix} {rendered_query} {self.query_postfix}'\n _LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)\n try:\n tables = self.influx.query(self.full_query)\n except (ConnectionError, ValueError) as exc:\n _LOGGER.error(exc)\n self.value = None\n return\n if not tables:\n _LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)\n self.value = None\n else:\n if len(tables) > 1 or len(tables[0].records) > 1:\n _LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)\n self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]\n<|end_body_1|>\n", "class_docstring": "Class for handling the data retrieval from Influx with Flux query.", "class_name": "InfluxFluxSensorData", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InfluxFluxSensorData:\n \"\"\"Class for handling the data retrieval from Influx with Flux query.\"\"\"\n\n def __init__(self, influx, bucket, range_start, range_stop, query, imports, group):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data by querying influx.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.influx = influx\n self.bucket = bucket\n self.range_start = range_start\n self.range_stop = range_stop\n self.query = query\n self.imports = imports\n self.group = group\n self.value = None\n self.full_query = None\n self.query_prefix = f'from(bucket:\"{bucket}\") |> range(start: {range_start}, stop: {range_stop}) |>'\n if imports is not None:\n for i in imports:\n self.query_prefix = f'import \"{i}\" {self.query_prefix}'\n if group is None:\n self.query_postfix = DEFAULT_FUNCTION_FLUX\n else:\n self.query_postfix = f'|> {group}(column: \"{INFLUX_CONF_VALUE_V2}\")'\n<|end_body_0|>\n\n<|body_start_1|>\n _LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)\n try:\n rendered_query = self.query.render(parse_result=False)\n except TemplateError as ex:\n _LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)\n return\n self.full_query = f'{self.query_prefix} {rendered_query} {self.query_postfix}'\n _LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)\n try:\n tables = self.influx.query(self.full_query)\n except (ConnectionError, ValueError) as exc:\n _LOGGER.error(exc)\n self.value = None\n return\n if not tables:\n _LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)\n self.value = None\n else:\n if len(tables) > 1 or len(tables[0].records) > 1:\n _LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)\n self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000040", "length_bytes": 11749, "license_type": "permissive", "methods": [{"docstring": "Initialize the data object.", "name": "__init__", "signature": "def __init__(self, influx, bucket, range_start, range_stop, query, imports, group)"}, {"docstring": "Get the latest data by querying influx.", "name": "update", "signature": "def update(self)"}], "n_methods": 2, "prompt": "Implement the Python class `InfluxFluxSensorData` described below.\n\nClass description:\nClass for handling the data retrieval from Influx with Flux query.\n\nMethod signatures and docstrings:\n- def __init__(self, influx, bucket, range_start, range_stop, query, imports, group): Initialize the data object.\n- def update(self): Get the latest data by querying influx.", "prompted_full_text": "Implement the Python class `InfluxFluxSensorData` described below.\n\nClass description:\nClass for handling the data retrieval from Influx with Flux query.\n\nMethod signatures and docstrings:\n- def __init__(self, influx, bucket, range_start, range_stop, query, imports, group): Initialize the data object.\n- def update(self): Get the latest data by querying influx.\n\n<|skeleton|>\nclass InfluxFluxSensorData:\n \"\"\"Class for handling the data retrieval from Influx with Flux query.\"\"\"\n\n def __init__(self, influx, bucket, range_start, range_stop, query, imports, group):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data by querying influx.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.influx = influx\n self.bucket = bucket\n self.range_start = range_start\n self.range_stop = range_stop\n self.query = query\n self.imports = imports\n self.group = group\n self.value = None\n self.full_query = None\n self.query_prefix = f'from(bucket:\"{bucket}\") |> range(start: {range_start}, stop: {range_stop}) |>'\n if imports is not None:\n for i in imports:\n self.query_prefix = f'import \"{i}\" {self.query_prefix}'\n if group is None:\n self.query_postfix = DEFAULT_FUNCTION_FLUX\n else:\n self.query_postfix = f'|> {group}(column: \"{INFLUX_CONF_VALUE_V2}\")'\n<|end_body_0|>\n\n<|body_start_1|>\n _LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)\n try:\n rendered_query = self.query.render(parse_result=False)\n except TemplateError as ex:\n _LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)\n return\n self.full_query = f'{self.query_prefix} {rendered_query} {self.query_postfix}'\n _LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)\n try:\n tables = self.influx.query(self.full_query)\n except (ConnectionError, ValueError) as exc:\n _LOGGER.error(exc)\n self.value = None\n return\n if not tables:\n _LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)\n self.value = None\n else:\n if len(tables) > 1 or len(tables[0].records) > 1:\n _LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)\n self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass InfluxFluxSensorData:\n \"\"\"Class for handling the data retrieval from Influx with Flux query.\"\"\"\n\n def __init__(self, influx, bucket, range_start, range_stop, query, imports, group):\n \"\"\"Initialize the data object.\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Get the latest data by querying influx.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InfluxFluxSensorData:\n \"\"\"Class for handling the data retrieval from Influx with Flux query.\"\"\"\n\n def __init__(self, influx, bucket, range_start, range_stop, query, imports, group):\n \"\"\"Initialize the data object.\"\"\"\n self.influx = influx\n self.bucket = bucket\n self.range_start = range_start\n self.range_stop = range_stop\n self.query = query\n self.imports = imports\n self.group = group\n self.value = None\n self.full_query = None\n self.query_prefix = f'from(bucket:\"{bucket}\") |> range(start: {range_start}, stop: {range_stop}) |>'\n if imports is not None:\n for i in imports:\n self.query_prefix = f'import \"{i}\" {self.query_prefix}'\n if group is None:\n self.query_postfix = DEFAULT_FUNCTION_FLUX\n else:\n self.query_postfix = f'|> {group}(column: \"{INFLUX_CONF_VALUE_V2}\")'\n\n def update(self):\n \"\"\"Get the latest data by querying influx.\"\"\"\n _LOGGER.debug(RENDERING_QUERY_MESSAGE, self.query)\n try:\n rendered_query = self.query.render(parse_result=False)\n except TemplateError as ex:\n _LOGGER.error(RENDERING_QUERY_ERROR_MESSAGE, ex)\n return\n self.full_query = f'{self.query_prefix} {rendered_query} {self.query_postfix}'\n _LOGGER.debug(RUNNING_QUERY_MESSAGE, self.full_query)\n try:\n tables = self.influx.query(self.full_query)\n except (ConnectionError, ValueError) as exc:\n _LOGGER.error(exc)\n self.value = None\n return\n if not tables:\n _LOGGER.warning(QUERY_NO_RESULTS_MESSAGE, self.full_query)\n self.value = None\n else:\n if len(tables) > 1 or len(tables[0].records) > 1:\n _LOGGER.warning(QUERY_MULTIPLE_RESULTS_MESSAGE, self.full_query)\n self.value = tables[0].records[0].values[INFLUX_CONF_VALUE_V2]\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/influxdb/sensor.py", "source_repo": "home-assistant/core", "split": "test", "star_events_count": 35501} {"blob_id": "c9f808cdb527dbfdd21052bb2ac9c8b5d4e6bb9f", "bodies": ["super(S3Authenticator, self).__init__(*args, **kwargs)\nself.connection = None\nself.bucket = None\nself._conn_kwargs = {}\nif host:\n self._conn_kwargs['host'] = host", "if not boto:\n raise RuntimeError('%s requires boto module which is N/A' % self)\nboto_lgr.setLevel(logging.CRITICAL if lgr.getEffectiveLevel() > 1 else logging.DEBUG)\nconn_kwargs = self._conn_kwargs.copy()\nif bucket_name.lower() != bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\nif credential is not None:\n credentials = credential()\n conn_kind = 'with authentication'\n conn_args = [credentials['key_id'], credentials['secret_id']]\n conn_kwargs['security_token'] = credentials.get('session')\nelse:\n conn_kind = 'anonymously'\n conn_args = []\n conn_kwargs['anon'] = True\nif '.' in bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\nlgr.info('S3 session: Connecting to the bucket %s %s', bucket_name, conn_kind)\nself.connection = conn = boto.connect_s3(*conn_args, **conn_kwargs)\nself.bucket = bucket = get_bucket(conn, bucket_name)\nreturn bucket"], "bodies_text": "<|body_start_0|>\n super(S3Authenticator, self).__init__(*args, **kwargs)\n self.connection = None\n self.bucket = None\n self._conn_kwargs = {}\n if host:\n self._conn_kwargs['host'] = host\n<|end_body_0|>\n\n<|body_start_1|>\n if not boto:\n raise RuntimeError('%s requires boto module which is N/A' % self)\n boto_lgr.setLevel(logging.CRITICAL if lgr.getEffectiveLevel() > 1 else logging.DEBUG)\n conn_kwargs = self._conn_kwargs.copy()\n if bucket_name.lower() != bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n if credential is not None:\n credentials = credential()\n conn_kind = 'with authentication'\n conn_args = [credentials['key_id'], credentials['secret_id']]\n conn_kwargs['security_token'] = credentials.get('session')\n else:\n conn_kind = 'anonymously'\n conn_args = []\n conn_kwargs['anon'] = True\n if '.' in bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n lgr.info('S3 session: Connecting to the bucket %s %s', bucket_name, conn_kind)\n self.connection = conn = boto.connect_s3(*conn_args, **conn_kwargs)\n self.bucket = bucket = get_bucket(conn, bucket_name)\n return bucket\n<|end_body_1|>\n", "class_docstring": "Authenticator for S3 AWS", "class_name": "S3Authenticator", "detected_licenses": ["BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass S3Authenticator:\n \"\"\"Authenticator for S3 AWS\"\"\"\n\n def __init__(self, *args, host=None, **kwargs):\n \"\"\"Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3\"\"\"\n <|body_0|>\n\n def authenticate(self, bucket_name, credential, cache=True):\n \"\"\"Authenticates to the specified bucket using provided credentials Returns ------- bucket\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(S3Authenticator, self).__init__(*args, **kwargs)\n self.connection = None\n self.bucket = None\n self._conn_kwargs = {}\n if host:\n self._conn_kwargs['host'] = host\n<|end_body_0|>\n\n<|body_start_1|>\n if not boto:\n raise RuntimeError('%s requires boto module which is N/A' % self)\n boto_lgr.setLevel(logging.CRITICAL if lgr.getEffectiveLevel() > 1 else logging.DEBUG)\n conn_kwargs = self._conn_kwargs.copy()\n if bucket_name.lower() != bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n if credential is not None:\n credentials = credential()\n conn_kind = 'with authentication'\n conn_args = [credentials['key_id'], credentials['secret_id']]\n conn_kwargs['security_token'] = credentials.get('session')\n else:\n conn_kind = 'anonymously'\n conn_args = []\n conn_kwargs['anon'] = True\n if '.' in bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n lgr.info('S3 session: Connecting to the bucket %s %s', bucket_name, conn_kind)\n self.connection = conn = boto.connect_s3(*conn_args, **conn_kwargs)\n self.bucket = bucket = get_bucket(conn, bucket_name)\n return bucket\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000041", "length_bytes": 9980, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3", "name": "__init__", "signature": "def __init__(self, *args, host=None, **kwargs)"}, {"docstring": "Authenticates to the specified bucket using provided credentials Returns ------- bucket", "name": "authenticate", "signature": "def authenticate(self, bucket_name, credential, cache=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042237", "prompt": "Implement the Python class `S3Authenticator` described below.\n\nClass description:\nAuthenticator for S3 AWS\n\nMethod signatures and docstrings:\n- def __init__(self, *args, host=None, **kwargs): Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3\n- def authenticate(self, bucket_name, credential, cache=True): Authenticates to the specified bucket using provided credentials Returns ------- bucket", "prompted_full_text": "Implement the Python class `S3Authenticator` described below.\n\nClass description:\nAuthenticator for S3 AWS\n\nMethod signatures and docstrings:\n- def __init__(self, *args, host=None, **kwargs): Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3\n- def authenticate(self, bucket_name, credential, cache=True): Authenticates to the specified bucket using provided credentials Returns ------- bucket\n\n<|skeleton|>\nclass S3Authenticator:\n \"\"\"Authenticator for S3 AWS\"\"\"\n\n def __init__(self, *args, host=None, **kwargs):\n \"\"\"Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3\"\"\"\n <|body_0|>\n\n def authenticate(self, bucket_name, credential, cache=True):\n \"\"\"Authenticates to the specified bucket using provided credentials Returns ------- bucket\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(S3Authenticator, self).__init__(*args, **kwargs)\n self.connection = None\n self.bucket = None\n self._conn_kwargs = {}\n if host:\n self._conn_kwargs['host'] = host\n<|end_body_0|>\n\n<|body_start_1|>\n if not boto:\n raise RuntimeError('%s requires boto module which is N/A' % self)\n boto_lgr.setLevel(logging.CRITICAL if lgr.getEffectiveLevel() > 1 else logging.DEBUG)\n conn_kwargs = self._conn_kwargs.copy()\n if bucket_name.lower() != bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n if credential is not None:\n credentials = credential()\n conn_kind = 'with authentication'\n conn_args = [credentials['key_id'], credentials['secret_id']]\n conn_kwargs['security_token'] = credentials.get('session')\n else:\n conn_kind = 'anonymously'\n conn_args = []\n conn_kwargs['anon'] = True\n if '.' in bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n lgr.info('S3 session: Connecting to the bucket %s %s', bucket_name, conn_kind)\n self.connection = conn = boto.connect_s3(*conn_args, **conn_kwargs)\n self.bucket = bucket = get_bucket(conn, bucket_name)\n return bucket\n<|end_body_1|>\n", "revision_id": "fa34dbcbb6da962fa343866c907de6414f4dde53", "skeleton": "<|skeleton|>\nclass S3Authenticator:\n \"\"\"Authenticator for S3 AWS\"\"\"\n\n def __init__(self, *args, host=None, **kwargs):\n \"\"\"Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3\"\"\"\n <|body_0|>\n\n def authenticate(self, bucket_name, credential, cache=True):\n \"\"\"Authenticates to the specified bucket using provided credentials Returns ------- bucket\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class S3Authenticator:\n \"\"\"Authenticator for S3 AWS\"\"\"\n\n def __init__(self, *args, host=None, **kwargs):\n \"\"\"Parameters ---------- host: str, optional In some cases it is necessary to provide host to connect to. Passed to boto.connect_s3\"\"\"\n super(S3Authenticator, self).__init__(*args, **kwargs)\n self.connection = None\n self.bucket = None\n self._conn_kwargs = {}\n if host:\n self._conn_kwargs['host'] = host\n\n def authenticate(self, bucket_name, credential, cache=True):\n \"\"\"Authenticates to the specified bucket using provided credentials Returns ------- bucket\"\"\"\n if not boto:\n raise RuntimeError('%s requires boto module which is N/A' % self)\n boto_lgr.setLevel(logging.CRITICAL if lgr.getEffectiveLevel() > 1 else logging.DEBUG)\n conn_kwargs = self._conn_kwargs.copy()\n if bucket_name.lower() != bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n if credential is not None:\n credentials = credential()\n conn_kind = 'with authentication'\n conn_args = [credentials['key_id'], credentials['secret_id']]\n conn_kwargs['security_token'] = credentials.get('session')\n else:\n conn_kind = 'anonymously'\n conn_args = []\n conn_kwargs['anon'] = True\n if '.' in bucket_name:\n conn_kwargs['calling_format'] = OrdinaryCallingFormat()\n lgr.info('S3 session: Connecting to the bucket %s %s', bucket_name, conn_kind)\n self.connection = conn = boto.connect_s3(*conn_args, **conn_kwargs)\n self.bucket = bucket = get_bucket(conn, bucket_name)\n return bucket\n", "source": "the_stack_v2_python_sparse", "source_path": "datalad/downloaders/s3.py", "source_repo": "kyleam/datalad", "split": "test", "star_events_count": 1} {"blob_id": "19182befadeb12399c27ee439264e4cfe86995a1", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn Filter()", "from .filter_group import FilterGroup\nfrom .filter_group import FilterGroup\nfields: Dict[str, Callable[[Any], None]] = {'categoryFilterGroups': lambda n: setattr(self, 'category_filter_groups', n.get_collection_of_object_values(FilterGroup)), 'groups': lambda n: setattr(self, 'groups', n.get_collection_of_object_values(FilterGroup)), 'inputFilterGroups': lambda n: setattr(self, 'input_filter_groups', n.get_collection_of_object_values(FilterGroup)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value())}\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nwriter.write_collection_of_object_values('categoryFilterGroups', self.category_filter_groups)\nwriter.write_collection_of_object_values('groups', self.groups)\nwriter.write_collection_of_object_values('inputFilterGroups', self.input_filter_groups)\nwriter.write_str_value('@odata.type', self.odata_type)\nwriter.write_additional_data_value(self.additional_data)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return Filter()\n<|end_body_0|>\n\n<|body_start_1|>\n from .filter_group import FilterGroup\n from .filter_group import FilterGroup\n fields: Dict[str, Callable[[Any], None]] = {'categoryFilterGroups': lambda n: setattr(self, 'category_filter_groups', n.get_collection_of_object_values(FilterGroup)), 'groups': lambda n: setattr(self, 'groups', n.get_collection_of_object_values(FilterGroup)), 'inputFilterGroups': lambda n: setattr(self, 'input_filter_groups', n.get_collection_of_object_values(FilterGroup)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_collection_of_object_values('categoryFilterGroups', self.category_filter_groups)\n writer.write_collection_of_object_values('groups', self.groups)\n writer.write_collection_of_object_values('inputFilterGroups', self.input_filter_groups)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Filter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Filter:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return Filter()\n<|end_body_0|>\n\n<|body_start_1|>\n from .filter_group import FilterGroup\n from .filter_group import FilterGroup\n fields: Dict[str, Callable[[Any], None]] = {'categoryFilterGroups': lambda n: setattr(self, 'category_filter_groups', n.get_collection_of_object_values(FilterGroup)), 'groups': lambda n: setattr(self, 'groups', n.get_collection_of_object_values(FilterGroup)), 'inputFilterGroups': lambda n: setattr(self, 'input_filter_groups', n.get_collection_of_object_values(FilterGroup)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_collection_of_object_values('categoryFilterGroups', self.category_filter_groups)\n writer.write_collection_of_object_values('groups', self.groups)\n writer.write_collection_of_object_values('inputFilterGroups', self.input_filter_groups)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000042", "length_bytes": 4333, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_001582", "prompt": "Implement the Python class `Filter` described below.\n\nClass description:\nImplement the Filter class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `Filter` described below.\n\nClass description:\nImplement the Filter class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass Filter:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return Filter()\n<|end_body_0|>\n\n<|body_start_1|>\n from .filter_group import FilterGroup\n from .filter_group import FilterGroup\n fields: Dict[str, Callable[[Any], None]] = {'categoryFilterGroups': lambda n: setattr(self, 'category_filter_groups', n.get_collection_of_object_values(FilterGroup)), 'groups': lambda n: setattr(self, 'groups', n.get_collection_of_object_values(FilterGroup)), 'inputFilterGroups': lambda n: setattr(self, 'input_filter_groups', n.get_collection_of_object_values(FilterGroup)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value())}\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_collection_of_object_values('categoryFilterGroups', self.category_filter_groups)\n writer.write_collection_of_object_values('groups', self.groups)\n writer.write_collection_of_object_values('inputFilterGroups', self.input_filter_groups)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_additional_data_value(self.additional_data)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass Filter:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Filter:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> Filter:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: Filter\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return Filter()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .filter_group import FilterGroup\n from .filter_group import FilterGroup\n fields: Dict[str, Callable[[Any], None]] = {'categoryFilterGroups': lambda n: setattr(self, 'category_filter_groups', n.get_collection_of_object_values(FilterGroup)), 'groups': lambda n: setattr(self, 'groups', n.get_collection_of_object_values(FilterGroup)), 'inputFilterGroups': lambda n: setattr(self, 'input_filter_groups', n.get_collection_of_object_values(FilterGroup)), '@odata.type': lambda n: setattr(self, 'odata_type', n.get_str_value())}\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n writer.write_collection_of_object_values('categoryFilterGroups', self.category_filter_groups)\n writer.write_collection_of_object_values('groups', self.groups)\n writer.write_collection_of_object_values('inputFilterGroups', self.input_filter_groups)\n writer.write_str_value('@odata.type', self.odata_type)\n writer.write_additional_data_value(self.additional_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/filter.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "d43ab899fdb378e7fd113a5f8f2504d4f1904d6f", "bodies": ["resp = self.client.get('/nchan/posts/')\nself.assertEqual(resp.status_code, 200)\nself.assertEqual(resp.data['count'], 0)", "b1 = Board.objects.create(board='01', title='test-board-01')\nPost.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\nPost.objects.create(title='second post', board=Board.objects.get(pk=b1.id), poster='friendly-frogs', text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')\nresp = self.client.get('/nchan/posts/')\nself.assertEqual(resp.data['count'], 2)\nself.assertIn('first post', str(resp.data['results']))\nself.assertIn('second post', str(resp.data['results']))", "b1 = Board.objects.create(board='01', title='test-board-01')\np1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\nresp = self.client.get(f'/nchan/posts/{p1.id}/')\nself.assertEqual(resp.data['title'], 'first post')", "b1 = Board.objects.create(board='01', title='test-board-01')\np1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\nComment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas', text='URL namespace \"admin\" isn\"t unique. You may not beable to reverse all URLs in this namespace')\nresp = self.client.get(f'/nchan/posts/{p1.id}/comments/')\nself.assertIn('glossy-gorillas', str(resp.data))"], "bodies_text": "<|body_start_0|>\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data['count'], 0)\n<|end_body_0|>\n\n<|body_start_1|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Post.objects.create(title='second post', board=Board.objects.get(pk=b1.id), poster='friendly-frogs', text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.data['count'], 2)\n self.assertIn('first post', str(resp.data['results']))\n self.assertIn('second post', str(resp.data['results']))\n<|end_body_1|>\n\n<|body_start_2|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n resp = self.client.get(f'/nchan/posts/{p1.id}/')\n self.assertEqual(resp.data['title'], 'first post')\n<|end_body_2|>\n\n<|body_start_3|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Comment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas', text='URL namespace \"admin\" isn\"t unique. You may not beable to reverse all URLs in this namespace')\n resp = self.client.get(f'/nchan/posts/{p1.id}/comments/')\n self.assertIn('glossy-gorillas', str(resp.data))\n<|end_body_3|>\n", "class_docstring": "", "class_name": "PostsTests", "detected_licenses": ["MIT", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PostsTests:\n\n def test_no_posts(self):\n \"\"\"Tests if the 'posts' endpoint is responding and if the response initially contains no posts\"\"\"\n <|body_0|>\n\n def test_returns_all_existing_posts(self):\n \"\"\"Tests if 'posts' endpoint returns created posts\"\"\"\n <|body_1|>\n\n def test_returns_single_post(self):\n \"\"\"Tests if 'posts//' endpoint returns the correct post\"\"\"\n <|body_2|>\n\n def test_returns_post_comments(self):\n \"\"\"Tests if 'posts//comments/' endpoint returns correct comments\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data['count'], 0)\n<|end_body_0|>\n\n<|body_start_1|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Post.objects.create(title='second post', board=Board.objects.get(pk=b1.id), poster='friendly-frogs', text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.data['count'], 2)\n self.assertIn('first post', str(resp.data['results']))\n self.assertIn('second post', str(resp.data['results']))\n<|end_body_1|>\n\n<|body_start_2|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n resp = self.client.get(f'/nchan/posts/{p1.id}/')\n self.assertEqual(resp.data['title'], 'first post')\n<|end_body_2|>\n\n<|body_start_3|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Comment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas', text='URL namespace \"admin\" isn\"t unique. You may not beable to reverse all URLs in this namespace')\n resp = self.client.get(f'/nchan/posts/{p1.id}/comments/')\n self.assertIn('glossy-gorillas', str(resp.data))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000043", "length_bytes": 5883, "license_type": "permissive", "methods": [{"docstring": "Tests if the 'posts' endpoint is responding and if the response initially contains no posts", "name": "test_no_posts", "signature": "def test_no_posts(self)"}, {"docstring": "Tests if 'posts' endpoint returns created posts", "name": "test_returns_all_existing_posts", "signature": "def test_returns_all_existing_posts(self)"}, {"docstring": "Tests if 'posts//' endpoint returns the correct post", "name": "test_returns_single_post", "signature": "def test_returns_single_post(self)"}, {"docstring": "Tests if 'posts//comments/' endpoint returns correct comments", "name": "test_returns_post_comments", "signature": "def test_returns_post_comments(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_027549", "prompt": "Implement the Python class `PostsTests` described below.\n\nClass description:\nImplement the PostsTests class.\n\nMethod signatures and docstrings:\n- def test_no_posts(self): Tests if the 'posts' endpoint is responding and if the response initially contains no posts\n- def test_returns_all_existing_posts(self): Tests if 'posts' endpoint returns created posts\n- def test_returns_single_post(self): Tests if 'posts//' endpoint returns the correct post\n- def test_returns_post_comments(self): Tests if 'posts//comments/' endpoint returns correct comments", "prompted_full_text": "Implement the Python class `PostsTests` described below.\n\nClass description:\nImplement the PostsTests class.\n\nMethod signatures and docstrings:\n- def test_no_posts(self): Tests if the 'posts' endpoint is responding and if the response initially contains no posts\n- def test_returns_all_existing_posts(self): Tests if 'posts' endpoint returns created posts\n- def test_returns_single_post(self): Tests if 'posts//' endpoint returns the correct post\n- def test_returns_post_comments(self): Tests if 'posts//comments/' endpoint returns correct comments\n\n<|skeleton|>\nclass PostsTests:\n\n def test_no_posts(self):\n \"\"\"Tests if the 'posts' endpoint is responding and if the response initially contains no posts\"\"\"\n <|body_0|>\n\n def test_returns_all_existing_posts(self):\n \"\"\"Tests if 'posts' endpoint returns created posts\"\"\"\n <|body_1|>\n\n def test_returns_single_post(self):\n \"\"\"Tests if 'posts//' endpoint returns the correct post\"\"\"\n <|body_2|>\n\n def test_returns_post_comments(self):\n \"\"\"Tests if 'posts//comments/' endpoint returns correct comments\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data['count'], 0)\n<|end_body_0|>\n\n<|body_start_1|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Post.objects.create(title='second post', board=Board.objects.get(pk=b1.id), poster='friendly-frogs', text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.data['count'], 2)\n self.assertIn('first post', str(resp.data['results']))\n self.assertIn('second post', str(resp.data['results']))\n<|end_body_1|>\n\n<|body_start_2|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n resp = self.client.get(f'/nchan/posts/{p1.id}/')\n self.assertEqual(resp.data['title'], 'first post')\n<|end_body_2|>\n\n<|body_start_3|>\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Comment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas', text='URL namespace \"admin\" isn\"t unique. You may not beable to reverse all URLs in this namespace')\n resp = self.client.get(f'/nchan/posts/{p1.id}/comments/')\n self.assertIn('glossy-gorillas', str(resp.data))\n<|end_body_3|>\n", "revision_id": "9d46c6a9e6ccfae1b9ab5db1b6bf2a6b0abe4c10", "skeleton": "<|skeleton|>\nclass PostsTests:\n\n def test_no_posts(self):\n \"\"\"Tests if the 'posts' endpoint is responding and if the response initially contains no posts\"\"\"\n <|body_0|>\n\n def test_returns_all_existing_posts(self):\n \"\"\"Tests if 'posts' endpoint returns created posts\"\"\"\n <|body_1|>\n\n def test_returns_single_post(self):\n \"\"\"Tests if 'posts//' endpoint returns the correct post\"\"\"\n <|body_2|>\n\n def test_returns_post_comments(self):\n \"\"\"Tests if 'posts//comments/' endpoint returns correct comments\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PostsTests:\n def test_no_posts(self):\n \"\"\"Tests if the 'posts' endpoint is responding and if the response initially contains no posts\"\"\"\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.data['count'], 0)\n\n def test_returns_all_existing_posts(self):\n \"\"\"Tests if 'posts' endpoint returns created posts\"\"\"\n b1 = Board.objects.create(board='01', title='test-board-01')\n Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Post.objects.create(title='second post', board=Board.objects.get(pk=b1.id), poster='friendly-frogs', text='sed do eiusmod tempor incididunt ut labore et dolore magna aliqua')\n resp = self.client.get('/nchan/posts/')\n self.assertEqual(resp.data['count'], 2)\n self.assertIn('first post', str(resp.data['results']))\n self.assertIn('second post', str(resp.data['results']))\n\n def test_returns_single_post(self):\n \"\"\"Tests if 'posts//' endpoint returns the correct post\"\"\"\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n resp = self.client.get(f'/nchan/posts/{p1.id}/')\n self.assertEqual(resp.data['title'], 'first post')\n\n def test_returns_post_comments(self):\n \"\"\"Tests if 'posts//comments/' endpoint returns correct comments\"\"\"\n b1 = Board.objects.create(board='01', title='test-board-01')\n p1 = Post.objects.create(title='first post', board=Board.objects.get(pk=b1.id), poster='festive-ferret', text='Lorem ipsum dolor sit amet, consectetur adipiscing elit')\n Comment.objects.create(post=Post.objects.get(pk=p1.id), commenter='glossy-gorillas', text='URL namespace \"admin\" isn\"t unique. You may not beable to reverse all URLs in this namespace')\n resp = self.client.get(f'/nchan/posts/{p1.id}/comments/')\n self.assertIn('glossy-gorillas', str(resp.data))\n", "source": "the_stack_v2_python_sparse", "source_path": "festive-ferrets/backend/nchan/tests.py", "source_repo": "whywhyy/summer-code-jam-2020", "split": "test", "star_events_count": 2} {"blob_id": "42b7a0649aa3139970e87a4a25ab0400c011e289", "bodies": ["query = {}\nif related_rule_id:\n query['rule__id'] = related_rule_id\nqueryset = ScheduledTask.filter(**query).prefetch_related('rule').offset(offset).limit(limit).order_by('-created_at')\nreturn await ScheduledTask_Pydantic.from_queryset(queryset)", "task = await ScheduledTask.get(id=task_id)\nif task is None:\n raise Exception('Task id not found: %s' % task_id)\nawait task.delete()"], "bodies_text": "<|body_start_0|>\n query = {}\n if related_rule_id:\n query['rule__id'] = related_rule_id\n queryset = ScheduledTask.filter(**query).prefetch_related('rule').offset(offset).limit(limit).order_by('-created_at')\n return await ScheduledTask_Pydantic.from_queryset(queryset)\n<|end_body_0|>\n\n<|body_start_1|>\n task = await ScheduledTask.get(id=task_id)\n if task is None:\n raise Exception('Task id not found: %s' % task_id)\n await task.delete()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ScheduledTaskRepository", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ScheduledTaskRepository:\n\n async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]:\n \"\"\"Get the list of scheduled tasks. :return: list of scheduled tasks\"\"\"\n <|body_0|>\n\n async def delete(task_id):\n \"\"\"Delete a scheduled task. :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = {}\n if related_rule_id:\n query['rule__id'] = related_rule_id\n queryset = ScheduledTask.filter(**query).prefetch_related('rule').offset(offset).limit(limit).order_by('-created_at')\n return await ScheduledTask_Pydantic.from_queryset(queryset)\n<|end_body_0|>\n\n<|body_start_1|>\n task = await ScheduledTask.get(id=task_id)\n if task is None:\n raise Exception('Task id not found: %s' % task_id)\n await task.delete()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000044", "length_bytes": 1095, "license_type": "permissive", "methods": [{"docstring": "Get the list of scheduled tasks. :return: list of scheduled tasks", "name": "get", "signature": "async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]"}, {"docstring": "Delete a scheduled task. :return: None", "name": "delete", "signature": "async def delete(task_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037606", "prompt": "Implement the Python class `ScheduledTaskRepository` described below.\n\nClass description:\nImplement the ScheduledTaskRepository class.\n\nMethod signatures and docstrings:\n- async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]: Get the list of scheduled tasks. :return: list of scheduled tasks\n- async def delete(task_id): Delete a scheduled task. :return: None", "prompted_full_text": "Implement the Python class `ScheduledTaskRepository` described below.\n\nClass description:\nImplement the ScheduledTaskRepository class.\n\nMethod signatures and docstrings:\n- async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]: Get the list of scheduled tasks. :return: list of scheduled tasks\n- async def delete(task_id): Delete a scheduled task. :return: None\n\n<|skeleton|>\nclass ScheduledTaskRepository:\n\n async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]:\n \"\"\"Get the list of scheduled tasks. :return: list of scheduled tasks\"\"\"\n <|body_0|>\n\n async def delete(task_id):\n \"\"\"Delete a scheduled task. :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n query = {}\n if related_rule_id:\n query['rule__id'] = related_rule_id\n queryset = ScheduledTask.filter(**query).prefetch_related('rule').offset(offset).limit(limit).order_by('-created_at')\n return await ScheduledTask_Pydantic.from_queryset(queryset)\n<|end_body_0|>\n\n<|body_start_1|>\n task = await ScheduledTask.get(id=task_id)\n if task is None:\n raise Exception('Task id not found: %s' % task_id)\n await task.delete()\n<|end_body_1|>\n", "revision_id": "ac3a15014ad3c3bdac523a6550934a06653cfba1", "skeleton": "<|skeleton|>\nclass ScheduledTaskRepository:\n\n async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]:\n \"\"\"Get the list of scheduled tasks. :return: list of scheduled tasks\"\"\"\n <|body_0|>\n\n async def delete(task_id):\n \"\"\"Delete a scheduled task. :return: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ScheduledTaskRepository:\n async def get(offset: int, limit: int, related_rule_id: int=None) -> List[ScheduledTask]:\n \"\"\"Get the list of scheduled tasks. :return: list of scheduled tasks\"\"\"\n query = {}\n if related_rule_id:\n query['rule__id'] = related_rule_id\n queryset = ScheduledTask.filter(**query).prefetch_related('rule').offset(offset).limit(limit).order_by('-created_at')\n return await ScheduledTask_Pydantic.from_queryset(queryset)\n\n async def delete(task_id):\n \"\"\"Delete a scheduled task. :return: None\"\"\"\n task = await ScheduledTask.get(id=task_id)\n if task is None:\n raise Exception('Task id not found: %s' % task_id)\n await task.delete()\n", "source": "the_stack_v2_python_sparse", "source_path": "packages/task-scheduler/task_scheduler/repositories/scheduled_task_handler.py", "source_repo": "matiasbavera/romi-dashboard", "split": "test", "star_events_count": 0} {"blob_id": "3806a11d56371d076c8d4caa9714ac5f41954241", "bodies": ["directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\nm, n = (len(board), len(board[0]))\nseen = set()\n\ndef dfs(x, y):\n if x < 0 or y < 0 or x >= m or (y >= n) or (board[x][y] == 'X') or ((x, y) in seen):\n return\n seen.add((x, y))\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n dfs(nx, ny)\nfor i in range(n):\n dfs(0, i)\n dfs(m - 1, i)\nfor i in range(m):\n dfs(i, 0)\n dfs(i, n - 1)\nfor i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'", "directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\nm, n = (len(board), len(board[0]))\nseen = set()\n\ndef bfs(x, y):\n if board[x][y] == 'X':\n return\n queue = [(x, y)]\n seen.add((x, y))\n while queue:\n x, y = queue.pop(0)\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n if nx < 0 or ny < 0 or nx >= m or (ny >= n) or (board[nx][ny] == 'X') or ((nx, ny) in seen):\n continue\n seen.add((nx, ny))\n queue.append((nx, ny))\nfor i in range(n):\n bfs(0, i)\n bfs(m - 1, i)\nfor i in range(m):\n bfs(i, 0)\n bfs(i, n - 1)\nfor i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'"], "bodies_text": "<|body_start_0|>\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def dfs(x, y):\n if x < 0 or y < 0 or x >= m or (y >= n) or (board[x][y] == 'X') or ((x, y) in seen):\n return\n seen.add((x, y))\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n dfs(nx, ny)\n for i in range(n):\n dfs(0, i)\n dfs(m - 1, i)\n for i in range(m):\n dfs(i, 0)\n dfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n<|end_body_0|>\n\n<|body_start_1|>\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def bfs(x, y):\n if board[x][y] == 'X':\n return\n queue = [(x, y)]\n seen.add((x, y))\n while queue:\n x, y = queue.pop(0)\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n if nx < 0 or ny < 0 or nx >= m or (ny >= n) or (board[nx][ny] == 'X') or ((nx, ny) in seen):\n continue\n seen.add((nx, ny))\n queue.append((nx, ny))\n for i in range(n):\n bfs(0, i)\n bfs(m - 1, i)\n for i in range(m):\n bfs(i, 0)\n bfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution130", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution130:\n\n def solve_dfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_0|>\n\n def solve_bfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def dfs(x, y):\n if x < 0 or y < 0 or x >= m or (y >= n) or (board[x][y] == 'X') or ((x, y) in seen):\n return\n seen.add((x, y))\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n dfs(nx, ny)\n for i in range(n):\n dfs(0, i)\n dfs(m - 1, i)\n for i in range(m):\n dfs(i, 0)\n dfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n<|end_body_0|>\n\n<|body_start_1|>\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def bfs(x, y):\n if board[x][y] == 'X':\n return\n queue = [(x, y)]\n seen.add((x, y))\n while queue:\n x, y = queue.pop(0)\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n if nx < 0 or ny < 0 or nx >= m or (ny >= n) or (board[nx][ny] == 'X') or ((nx, ny) in seen):\n continue\n seen.add((nx, ny))\n queue.append((nx, ny))\n for i in range(n):\n bfs(0, i)\n bfs(m - 1, i)\n for i in range(m):\n bfs(i, 0)\n bfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000045", "length_bytes": 35771, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify board in-place instead.", "name": "solve_dfs", "signature": "def solve_dfs(self, board: List[List[str]]) -> None"}, {"docstring": "Do not return anything, modify board in-place instead.", "name": "solve_bfs", "signature": "def solve_bfs(self, board: List[List[str]]) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039829", "prompt": "Implement the Python class `Solution130` described below.\n\nClass description:\nImplement the Solution130 class.\n\nMethod signatures and docstrings:\n- def solve_dfs(self, board: List[List[str]]) -> None: Do not return anything, modify board in-place instead.\n- def solve_bfs(self, board: List[List[str]]) -> None: Do not return anything, modify board in-place instead.", "prompted_full_text": "Implement the Python class `Solution130` described below.\n\nClass description:\nImplement the Solution130 class.\n\nMethod signatures and docstrings:\n- def solve_dfs(self, board: List[List[str]]) -> None: Do not return anything, modify board in-place instead.\n- def solve_bfs(self, board: List[List[str]]) -> None: Do not return anything, modify board in-place instead.\n\n<|skeleton|>\nclass Solution130:\n\n def solve_dfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_0|>\n\n def solve_bfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def dfs(x, y):\n if x < 0 or y < 0 or x >= m or (y >= n) or (board[x][y] == 'X') or ((x, y) in seen):\n return\n seen.add((x, y))\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n dfs(nx, ny)\n for i in range(n):\n dfs(0, i)\n dfs(m - 1, i)\n for i in range(m):\n dfs(i, 0)\n dfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n<|end_body_0|>\n\n<|body_start_1|>\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def bfs(x, y):\n if board[x][y] == 'X':\n return\n queue = [(x, y)]\n seen.add((x, y))\n while queue:\n x, y = queue.pop(0)\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n if nx < 0 or ny < 0 or nx >= m or (ny >= n) or (board[nx][ny] == 'X') or ((nx, ny) in seen):\n continue\n seen.add((nx, ny))\n queue.append((nx, ny))\n for i in range(n):\n bfs(0, i)\n bfs(m - 1, i)\n for i in range(m):\n bfs(i, 0)\n bfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n<|end_body_1|>\n", "revision_id": "dca40686c6a280bd394feb8e6e78d40eecf854b9", "skeleton": "<|skeleton|>\nclass Solution130:\n\n def solve_dfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_0|>\n\n def solve_bfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution130:\n def solve_dfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def dfs(x, y):\n if x < 0 or y < 0 or x >= m or (y >= n) or (board[x][y] == 'X') or ((x, y) in seen):\n return\n seen.add((x, y))\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n dfs(nx, ny)\n for i in range(n):\n dfs(0, i)\n dfs(m - 1, i)\n for i in range(m):\n dfs(i, 0)\n dfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n\n def solve_bfs(self, board: List[List[str]]) -> None:\n \"\"\"Do not return anything, modify board in-place instead.\"\"\"\n directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]\n m, n = (len(board), len(board[0]))\n seen = set()\n\n def bfs(x, y):\n if board[x][y] == 'X':\n return\n queue = [(x, y)]\n seen.add((x, y))\n while queue:\n x, y = queue.pop(0)\n for dx, dy in directions:\n nx, ny = (x + dx, y + dy)\n if nx < 0 or ny < 0 or nx >= m or (ny >= n) or (board[nx][ny] == 'X') or ((nx, ny) in seen):\n continue\n seen.add((nx, ny))\n queue.append((nx, ny))\n for i in range(n):\n bfs(0, i)\n bfs(m - 1, i)\n for i in range(m):\n bfs(i, 0)\n bfs(i, n - 1)\n for i in range(m):\n for j in range(n):\n board[i][j] = 'X' if (i, j) not in seen else 'O'\n", "source": "the_stack_v2_python_sparse", "source_path": "src/data_structure/search/dfs_bfs_graph_traverse.py", "source_repo": "1325052669/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "940f96073b4be6f1cd26d60d650d699e43d1e099", "bodies": ["for obj in queryset:\n obj.is_tracked = not obj.is_tracked\n if not obj.is_tracked:\n obj.is_visible = False\n obj.save()", "for obj in queryset:\n obj.is_visible = not obj.is_visible\n if obj.is_visible:\n obj.is_tracked = True\n obj.save()"], "bodies_text": "<|body_start_0|>\n for obj in queryset:\n obj.is_tracked = not obj.is_tracked\n if not obj.is_tracked:\n obj.is_visible = False\n obj.save()\n<|end_body_0|>\n\n<|body_start_1|>\n for obj in queryset:\n obj.is_visible = not obj.is_visible\n if obj.is_visible:\n obj.is_tracked = True\n obj.save()\n<|end_body_1|>\n", "class_docstring": "A base ModelAdmin class.", "class_name": "ModelAdmin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelAdmin:\n \"\"\"A base ModelAdmin class.\"\"\"\n\n def change_tracking(self, request, queryset):\n \"\"\"Inverse tracking of the object.\"\"\"\n <|body_0|>\n\n def change_visibility(self, request, queryset):\n \"\"\"Inverse visibility of the object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for obj in queryset:\n obj.is_tracked = not obj.is_tracked\n if not obj.is_tracked:\n obj.is_visible = False\n obj.save()\n<|end_body_0|>\n\n<|body_start_1|>\n for obj in queryset:\n obj.is_visible = not obj.is_visible\n if obj.is_visible:\n obj.is_tracked = True\n obj.save()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000046", "length_bytes": 939, "license_type": "no_license", "methods": [{"docstring": "Inverse tracking of the object.", "name": "change_tracking", "signature": "def change_tracking(self, request, queryset)"}, {"docstring": "Inverse visibility of the object.", "name": "change_visibility", "signature": "def change_visibility(self, request, queryset)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000053", "prompt": "Implement the Python class `ModelAdmin` described below.\n\nClass description:\nA base ModelAdmin class.\n\nMethod signatures and docstrings:\n- def change_tracking(self, request, queryset): Inverse tracking of the object.\n- def change_visibility(self, request, queryset): Inverse visibility of the object.", "prompted_full_text": "Implement the Python class `ModelAdmin` described below.\n\nClass description:\nA base ModelAdmin class.\n\nMethod signatures and docstrings:\n- def change_tracking(self, request, queryset): Inverse tracking of the object.\n- def change_visibility(self, request, queryset): Inverse visibility of the object.\n\n<|skeleton|>\nclass ModelAdmin:\n \"\"\"A base ModelAdmin class.\"\"\"\n\n def change_tracking(self, request, queryset):\n \"\"\"Inverse tracking of the object.\"\"\"\n <|body_0|>\n\n def change_visibility(self, request, queryset):\n \"\"\"Inverse visibility of the object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for obj in queryset:\n obj.is_tracked = not obj.is_tracked\n if not obj.is_tracked:\n obj.is_visible = False\n obj.save()\n<|end_body_0|>\n\n<|body_start_1|>\n for obj in queryset:\n obj.is_visible = not obj.is_visible\n if obj.is_visible:\n obj.is_tracked = True\n obj.save()\n<|end_body_1|>\n", "revision_id": "606cb7d86c2ec0006e060de7b118323a2a9317d1", "skeleton": "<|skeleton|>\nclass ModelAdmin:\n \"\"\"A base ModelAdmin class.\"\"\"\n\n def change_tracking(self, request, queryset):\n \"\"\"Inverse tracking of the object.\"\"\"\n <|body_0|>\n\n def change_visibility(self, request, queryset):\n \"\"\"Inverse visibility of the object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModelAdmin:\n \"\"\"A base ModelAdmin class.\"\"\"\n\n def change_tracking(self, request, queryset):\n \"\"\"Inverse tracking of the object.\"\"\"\n for obj in queryset:\n obj.is_tracked = not obj.is_tracked\n if not obj.is_tracked:\n obj.is_visible = False\n obj.save()\n\n def change_visibility(self, request, queryset):\n \"\"\"Inverse visibility of the object.\"\"\"\n for obj in queryset:\n obj.is_visible = not obj.is_visible\n if obj.is_visible:\n obj.is_tracked = True\n obj.save()\n", "source": "the_stack_v2_python_sparse", "source_path": "contributors/admin/base.py", "source_repo": "slavarobotam/hexlet-friends", "split": "test", "star_events_count": 0} {"blob_id": "16b4cab1284c48215977119e562aac0eb365399f", "bodies": ["if layer not in [0, 1]:\n raise NotImplementedError(f'Layer {layer} is not supported for Color256 objects! Please choose from 0, 1')\nself.names = {'black': 0, 'red': 1, 'green': 2, 'yellow': 3, 'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}\nself.layer_offset = layer * 10", "if color.startswith('#'):\n color = color[1:]\nrgb = []\nfor i in (0, 2, 4):\n color_hex = color[i:i + 2]\n rgb.append(int(color_hex, 16))\nreturn (rgb[0], rgb[1], rgb[2])", "if isinstance(color, str) and all((char in hexdigits or char == '#' for char in color)):\n try:\n color = self.translate_hex(color)\n except ValueError:\n pass\nif color in self.names:\n new = self.names[str(color)]\n assert isinstance(new, int)\n color = new\nif isinstance(color, tuple):\n red, green, blue = color\n color_value = f'2;{red};{green};{blue}m'\nelif isinstance(color, int) or color.isdigit():\n color_value = f'5;{color}m'\nelse:\n raise NotImplementedError(f'Not sure what to do with {color} of type {type(color)}')\nreturn f'\\x1b[{38 + self.layer_offset};' + color_value + text + (set_mode('reset') if reset_color else '')"], "bodies_text": "<|body_start_0|>\n if layer not in [0, 1]:\n raise NotImplementedError(f'Layer {layer} is not supported for Color256 objects! Please choose from 0, 1')\n self.names = {'black': 0, 'red': 1, 'green': 2, 'yellow': 3, 'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}\n self.layer_offset = layer * 10\n<|end_body_0|>\n\n<|body_start_1|>\n if color.startswith('#'):\n color = color[1:]\n rgb = []\n for i in (0, 2, 4):\n color_hex = color[i:i + 2]\n rgb.append(int(color_hex, 16))\n return (rgb[0], rgb[1], rgb[2])\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(color, str) and all((char in hexdigits or char == '#' for char in color)):\n try:\n color = self.translate_hex(color)\n except ValueError:\n pass\n if color in self.names:\n new = self.names[str(color)]\n assert isinstance(new, int)\n color = new\n if isinstance(color, tuple):\n red, green, blue = color\n color_value = f'2;{red};{green};{blue}m'\n elif isinstance(color, int) or color.isdigit():\n color_value = f'5;{color}m'\n else:\n raise NotImplementedError(f'Not sure what to do with {color} of type {type(color)}')\n return f'\\x1b[{38 + self.layer_offset};' + color_value + text + (set_mode('reset') if reset_color else '')\n<|end_body_2|>\n", "class_docstring": "Base color object", "class_name": "_Color", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _Color:\n \"\"\"Base color object\"\"\"\n\n def __init__(self, layer: int=0) -> None:\n \"\"\"Set layer\"\"\"\n <|body_0|>\n\n def translate_hex(color: str) -> tuple[int, int, int]:\n \"\"\"Translate hex string to rgb values\"\"\"\n <|body_1|>\n\n def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str:\n \"\"\"Return colored text with reset code at the end\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if layer not in [0, 1]:\n raise NotImplementedError(f'Layer {layer} is not supported for Color256 objects! Please choose from 0, 1')\n self.names = {'black': 0, 'red': 1, 'green': 2, 'yellow': 3, 'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}\n self.layer_offset = layer * 10\n<|end_body_0|>\n\n<|body_start_1|>\n if color.startswith('#'):\n color = color[1:]\n rgb = []\n for i in (0, 2, 4):\n color_hex = color[i:i + 2]\n rgb.append(int(color_hex, 16))\n return (rgb[0], rgb[1], rgb[2])\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(color, str) and all((char in hexdigits or char == '#' for char in color)):\n try:\n color = self.translate_hex(color)\n except ValueError:\n pass\n if color in self.names:\n new = self.names[str(color)]\n assert isinstance(new, int)\n color = new\n if isinstance(color, tuple):\n red, green, blue = color\n color_value = f'2;{red};{green};{blue}m'\n elif isinstance(color, int) or color.isdigit():\n color_value = f'5;{color}m'\n else:\n raise NotImplementedError(f'Not sure what to do with {color} of type {type(color)}')\n return f'\\x1b[{38 + self.layer_offset};' + color_value + text + (set_mode('reset') if reset_color else '')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000047", "length_bytes": 16290, "license_type": "no_license", "methods": [{"docstring": "Set layer", "name": "__init__", "signature": "def __init__(self, layer: int=0) -> None"}, {"docstring": "Translate hex string to rgb values", "name": "translate_hex", "signature": "def translate_hex(color: str) -> tuple[int, int, int]"}, {"docstring": "Return colored text with reset code at the end", "name": "__call__", "signature": "def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str"}], "n_methods": 3, "prompt": "Implement the Python class `_Color` described below.\n\nClass description:\nBase color object\n\nMethod signatures and docstrings:\n- def __init__(self, layer: int=0) -> None: Set layer\n- def translate_hex(color: str) -> tuple[int, int, int]: Translate hex string to rgb values\n- def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str: Return colored text with reset code at the end", "prompted_full_text": "Implement the Python class `_Color` described below.\n\nClass description:\nBase color object\n\nMethod signatures and docstrings:\n- def __init__(self, layer: int=0) -> None: Set layer\n- def translate_hex(color: str) -> tuple[int, int, int]: Translate hex string to rgb values\n- def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str: Return colored text with reset code at the end\n\n<|skeleton|>\nclass _Color:\n \"\"\"Base color object\"\"\"\n\n def __init__(self, layer: int=0) -> None:\n \"\"\"Set layer\"\"\"\n <|body_0|>\n\n def translate_hex(color: str) -> tuple[int, int, int]:\n \"\"\"Translate hex string to rgb values\"\"\"\n <|body_1|>\n\n def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str:\n \"\"\"Return colored text with reset code at the end\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if layer not in [0, 1]:\n raise NotImplementedError(f'Layer {layer} is not supported for Color256 objects! Please choose from 0, 1')\n self.names = {'black': 0, 'red': 1, 'green': 2, 'yellow': 3, 'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}\n self.layer_offset = layer * 10\n<|end_body_0|>\n\n<|body_start_1|>\n if color.startswith('#'):\n color = color[1:]\n rgb = []\n for i in (0, 2, 4):\n color_hex = color[i:i + 2]\n rgb.append(int(color_hex, 16))\n return (rgb[0], rgb[1], rgb[2])\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(color, str) and all((char in hexdigits or char == '#' for char in color)):\n try:\n color = self.translate_hex(color)\n except ValueError:\n pass\n if color in self.names:\n new = self.names[str(color)]\n assert isinstance(new, int)\n color = new\n if isinstance(color, tuple):\n red, green, blue = color\n color_value = f'2;{red};{green};{blue}m'\n elif isinstance(color, int) or color.isdigit():\n color_value = f'5;{color}m'\n else:\n raise NotImplementedError(f'Not sure what to do with {color} of type {type(color)}')\n return f'\\x1b[{38 + self.layer_offset};' + color_value + text + (set_mode('reset') if reset_color else '')\n<|end_body_2|>\n", "revision_id": "05ddaf41fd8de11c7300a8ba125eddf9e1ee1131", "skeleton": "<|skeleton|>\nclass _Color:\n \"\"\"Base color object\"\"\"\n\n def __init__(self, layer: int=0) -> None:\n \"\"\"Set layer\"\"\"\n <|body_0|>\n\n def translate_hex(color: str) -> tuple[int, int, int]:\n \"\"\"Translate hex string to rgb values\"\"\"\n <|body_1|>\n\n def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str:\n \"\"\"Return colored text with reset code at the end\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class _Color:\n \"\"\"Base color object\"\"\"\n\n def __init__(self, layer: int=0) -> None:\n \"\"\"Set layer\"\"\"\n if layer not in [0, 1]:\n raise NotImplementedError(f'Layer {layer} is not supported for Color256 objects! Please choose from 0, 1')\n self.names = {'black': 0, 'red': 1, 'green': 2, 'yellow': 3, 'blue': 4, 'magenta': 5, 'cyan': 6, 'white': 7}\n self.layer_offset = layer * 10\n\n def translate_hex(color: str) -> tuple[int, int, int]:\n \"\"\"Translate hex string to rgb values\"\"\"\n if color.startswith('#'):\n color = color[1:]\n rgb = []\n for i in (0, 2, 4):\n color_hex = color[i:i + 2]\n rgb.append(int(color_hex, 16))\n return (rgb[0], rgb[1], rgb[2])\n\n def __call__(self, text: str, color: Union[int, str, tuple[Union[int, str], Union[int, str], Union[int, str]]], reset_color: bool=True) -> str:\n \"\"\"Return colored text with reset code at the end\"\"\"\n if isinstance(color, str) and all((char in hexdigits or char == '#' for char in color)):\n try:\n color = self.translate_hex(color)\n except ValueError:\n pass\n if color in self.names:\n new = self.names[str(color)]\n assert isinstance(new, int)\n color = new\n if isinstance(color, tuple):\n red, green, blue = color\n color_value = f'2;{red};{green};{blue}m'\n elif isinstance(color, int) or color.isdigit():\n color_value = f'5;{color}m'\n else:\n raise NotImplementedError(f'Not sure what to do with {color} of type {type(color)}')\n return f'\\x1b[{38 + self.layer_offset};' + color_value + text + (set_mode('reset') if reset_color else '')\n", "source": "the_stack_v2_python_sparse", "source_path": "pytermgui/ansi_interface.py", "source_repo": "ekapujiw2002/pytermgui", "split": "test", "star_events_count": 0} {"blob_id": "05b426e3a4ff61e8a83a41dba9babfffd47ee616", "bodies": ["if isinstance(sigma, (float, int)):\n self.sigma = lambda t: sigma\nelif callable(sigma):\n self.sigma = sigma\nif isinstance(beta, (float, int)):\n self.beta = lambda t: beta\nelif callable(beta):\n self.beta = beta\nif isinstance(ds, (float, int)):\n self.ds = lambda t: ds\nelif callable(ds):\n self.ds = ds\nif isinstance(di, (float, int)):\n self.di = lambda t: di\nelif callable(di):\n self.di = di\nif isinstance(rho, (float, int)):\n self.rho = lambda t: rho\nelif callable(rho):\n self.rho = rho\nif isinstance(alfa, (float, int)):\n self.alfa = lambda t: alfa\nelif callable(alfa):\n self.alfa = alfa\nself.S0, self.I0, self.Z0, self.R0, self.T = (S0, I0, Z0, R0, T)", "S, I, Z, R = u\nu0 = self.sigma(t) - self.beta(t) * S * Z - self.ds(t) * S\nu1 = self.beta(t) * S * Z - self.rho(t) * I - self.di(t) * I\nu2 = self.rho(t) * I - self.alfa(t) * S * Z\nu3 = self.ds(t) * S + self.di(t) * I + self.alfa(t) * S * Z\nreturn [u0, u1, u2, u3]"], "bodies_text": "<|body_start_0|>\n if isinstance(sigma, (float, int)):\n self.sigma = lambda t: sigma\n elif callable(sigma):\n self.sigma = sigma\n if isinstance(beta, (float, int)):\n self.beta = lambda t: beta\n elif callable(beta):\n self.beta = beta\n if isinstance(ds, (float, int)):\n self.ds = lambda t: ds\n elif callable(ds):\n self.ds = ds\n if isinstance(di, (float, int)):\n self.di = lambda t: di\n elif callable(di):\n self.di = di\n if isinstance(rho, (float, int)):\n self.rho = lambda t: rho\n elif callable(rho):\n self.rho = rho\n if isinstance(alfa, (float, int)):\n self.alfa = lambda t: alfa\n elif callable(alfa):\n self.alfa = alfa\n self.S0, self.I0, self.Z0, self.R0, self.T = (S0, I0, Z0, R0, T)\n<|end_body_0|>\n\n<|body_start_1|>\n S, I, Z, R = u\n u0 = self.sigma(t) - self.beta(t) * S * Z - self.ds(t) * S\n u1 = self.beta(t) * S * Z - self.rho(t) * I - self.di(t) * I\n u2 = self.rho(t) * I - self.alfa(t) * S * Z\n u3 = self.ds(t) * S + self.di(t) * I + self.alfa(t) * S * Z\n return [u0, u1, u2, u3]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ProblemSIZR", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProblemSIZR:\n\n def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T):\n \"\"\"nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]\"\"\"\n <|body_0|>\n\n def __call__(self, u, t):\n \"\"\"Høyresiden(e) i ODE-systemet\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(sigma, (float, int)):\n self.sigma = lambda t: sigma\n elif callable(sigma):\n self.sigma = sigma\n if isinstance(beta, (float, int)):\n self.beta = lambda t: beta\n elif callable(beta):\n self.beta = beta\n if isinstance(ds, (float, int)):\n self.ds = lambda t: ds\n elif callable(ds):\n self.ds = ds\n if isinstance(di, (float, int)):\n self.di = lambda t: di\n elif callable(di):\n self.di = di\n if isinstance(rho, (float, int)):\n self.rho = lambda t: rho\n elif callable(rho):\n self.rho = rho\n if isinstance(alfa, (float, int)):\n self.alfa = lambda t: alfa\n elif callable(alfa):\n self.alfa = alfa\n self.S0, self.I0, self.Z0, self.R0, self.T = (S0, I0, Z0, R0, T)\n<|end_body_0|>\n\n<|body_start_1|>\n S, I, Z, R = u\n u0 = self.sigma(t) - self.beta(t) * S * Z - self.ds(t) * S\n u1 = self.beta(t) * S * Z - self.rho(t) * I - self.di(t) * I\n u2 = self.rho(t) * I - self.alfa(t) * S * Z\n u3 = self.ds(t) * S + self.di(t) * I + self.alfa(t) * S * Z\n return [u0, u1, u2, u3]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000048", "length_bytes": 3967, "license_type": "no_license", "methods": [{"docstring": "nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]", "name": "__init__", "signature": "def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T)"}, {"docstring": "Høyresiden(e) i ODE-systemet", "name": "__call__", "signature": "def __call__(self, u, t)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053225", "prompt": "Implement the Python class `ProblemSIZR` described below.\n\nClass description:\nImplement the ProblemSIZR class.\n\nMethod signatures and docstrings:\n- def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T): nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]\n- def __call__(self, u, t): Høyresiden(e) i ODE-systemet", "prompted_full_text": "Implement the Python class `ProblemSIZR` described below.\n\nClass description:\nImplement the ProblemSIZR class.\n\nMethod signatures and docstrings:\n- def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T): nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]\n- def __call__(self, u, t): Høyresiden(e) i ODE-systemet\n\n<|skeleton|>\nclass ProblemSIZR:\n\n def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T):\n \"\"\"nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]\"\"\"\n <|body_0|>\n\n def __call__(self, u, t):\n \"\"\"Høyresiden(e) i ODE-systemet\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(sigma, (float, int)):\n self.sigma = lambda t: sigma\n elif callable(sigma):\n self.sigma = sigma\n if isinstance(beta, (float, int)):\n self.beta = lambda t: beta\n elif callable(beta):\n self.beta = beta\n if isinstance(ds, (float, int)):\n self.ds = lambda t: ds\n elif callable(ds):\n self.ds = ds\n if isinstance(di, (float, int)):\n self.di = lambda t: di\n elif callable(di):\n self.di = di\n if isinstance(rho, (float, int)):\n self.rho = lambda t: rho\n elif callable(rho):\n self.rho = rho\n if isinstance(alfa, (float, int)):\n self.alfa = lambda t: alfa\n elif callable(alfa):\n self.alfa = alfa\n self.S0, self.I0, self.Z0, self.R0, self.T = (S0, I0, Z0, R0, T)\n<|end_body_0|>\n\n<|body_start_1|>\n S, I, Z, R = u\n u0 = self.sigma(t) - self.beta(t) * S * Z - self.ds(t) * S\n u1 = self.beta(t) * S * Z - self.rho(t) * I - self.di(t) * I\n u2 = self.rho(t) * I - self.alfa(t) * S * Z\n u3 = self.ds(t) * S + self.di(t) * I + self.alfa(t) * S * Z\n return [u0, u1, u2, u3]\n<|end_body_1|>\n", "revision_id": "c8d97c2903078471f8e419f88cc8488d9b8fc7da", "skeleton": "<|skeleton|>\nclass ProblemSIZR:\n\n def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T):\n \"\"\"nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]\"\"\"\n <|body_0|>\n\n def __call__(self, u, t):\n \"\"\"Høyresiden(e) i ODE-systemet\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProblemSIZR:\n def __init__(self, sigma, beta, ds, di, rho, alfa, S0, I0, Z0, R0, T):\n \"\"\"nu, beta: parametere i ODE-systemet S0,I0,R0 = init verdier T: Simulering for t i [0,T]\"\"\"\n if isinstance(sigma, (float, int)):\n self.sigma = lambda t: sigma\n elif callable(sigma):\n self.sigma = sigma\n if isinstance(beta, (float, int)):\n self.beta = lambda t: beta\n elif callable(beta):\n self.beta = beta\n if isinstance(ds, (float, int)):\n self.ds = lambda t: ds\n elif callable(ds):\n self.ds = ds\n if isinstance(di, (float, int)):\n self.di = lambda t: di\n elif callable(di):\n self.di = di\n if isinstance(rho, (float, int)):\n self.rho = lambda t: rho\n elif callable(rho):\n self.rho = rho\n if isinstance(alfa, (float, int)):\n self.alfa = lambda t: alfa\n elif callable(alfa):\n self.alfa = alfa\n self.S0, self.I0, self.Z0, self.R0, self.T = (S0, I0, Z0, R0, T)\n\n def __call__(self, u, t):\n \"\"\"Høyresiden(e) i ODE-systemet\"\"\"\n S, I, Z, R = u\n u0 = self.sigma(t) - self.beta(t) * S * Z - self.ds(t) * S\n u1 = self.beta(t) * S * Z - self.rho(t) * I - self.di(t) * I\n u2 = self.rho(t) * I - self.alfa(t) * S * Z\n u3 = self.ds(t) * S + self.di(t) * I + self.alfa(t) * S * Z\n return [u0, u1, u2, u3]\n", "source": "the_stack_v2_python_sparse", "source_path": "Prosjekt/SIZR.py", "source_repo": "lasse-steinnes/IN1900", "split": "test", "star_events_count": 0} {"blob_id": "df950fdaba2fcdad0fa7885f1da8612154d3fe11", "bodies": ["with patch('beeline.get_beeline') as p:\n p.return_value = None\n\n @awslambda.beeline_wrapper\n def foo(event, context):\n return 1\n self.assertEqual(foo(None, None), 1)\n\n @awslambda.beeline_wrapper()\n def bar(event, context):\n return 1\n self.assertEqual(bar(None, None), 1)", "with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)", "with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.add_context') as m_add_context, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n raise ValueError('something went wrong')\n with self.assertRaises(ValueError):\n handler(m_event, m_context)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context.assert_called_once_with({'app.exception_type': ANY, 'app.exception_string': 'something went wrong', 'app.exception_stacktrace': ANY})", "with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_input=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)", "with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_output=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.not_called_with('app.response', 1)"], "bodies_text": "<|body_start_0|>\n with patch('beeline.get_beeline') as p:\n p.return_value = None\n\n @awslambda.beeline_wrapper\n def foo(event, context):\n return 1\n self.assertEqual(foo(None, None), 1)\n\n @awslambda.beeline_wrapper()\n def bar(event, context):\n return 1\n self.assertEqual(bar(None, None), 1)\n<|end_body_0|>\n\n<|body_start_1|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n<|end_body_1|>\n\n<|body_start_2|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.add_context') as m_add_context, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n raise ValueError('something went wrong')\n with self.assertRaises(ValueError):\n handler(m_event, m_context)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context.assert_called_once_with({'app.exception_type': ANY, 'app.exception_string': 'something went wrong', 'app.exception_stacktrace': ANY})\n<|end_body_2|>\n\n<|body_start_3|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_input=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n<|end_body_3|>\n\n<|body_start_4|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_output=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.not_called_with('app.response', 1)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "TestLambdaWrapper", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestLambdaWrapper:\n\n def test_wrapper_works_no_init(self):\n \"\"\"ensure that the wrapper doesn't break anything if used before beeline.init is called\"\"\"\n <|body_0|>\n\n def test_basic_instrumentation(self):\n \"\"\"ensure basic event fields get instrumented\"\"\"\n <|body_1|>\n\n def test_handle_exceptions(self):\n \"\"\"ensure instrumentation occurs when the handler raises an exception\"\"\"\n <|body_2|>\n\n def test_can_omit_input(self):\n \"\"\"ensure input event field can be omitted\"\"\"\n <|body_3|>\n\n def test_can_omit_output(self):\n \"\"\"ensure output event fields can be omitted\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with patch('beeline.get_beeline') as p:\n p.return_value = None\n\n @awslambda.beeline_wrapper\n def foo(event, context):\n return 1\n self.assertEqual(foo(None, None), 1)\n\n @awslambda.beeline_wrapper()\n def bar(event, context):\n return 1\n self.assertEqual(bar(None, None), 1)\n<|end_body_0|>\n\n<|body_start_1|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n<|end_body_1|>\n\n<|body_start_2|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.add_context') as m_add_context, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n raise ValueError('something went wrong')\n with self.assertRaises(ValueError):\n handler(m_event, m_context)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context.assert_called_once_with({'app.exception_type': ANY, 'app.exception_string': 'something went wrong', 'app.exception_stacktrace': ANY})\n<|end_body_2|>\n\n<|body_start_3|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_input=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n<|end_body_3|>\n\n<|body_start_4|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_output=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.not_called_with('app.response', 1)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000049", "length_bytes": 10594, "license_type": "permissive", "methods": [{"docstring": "ensure that the wrapper doesn't break anything if used before beeline.init is called", "name": "test_wrapper_works_no_init", "signature": "def test_wrapper_works_no_init(self)"}, {"docstring": "ensure basic event fields get instrumented", "name": "test_basic_instrumentation", "signature": "def test_basic_instrumentation(self)"}, {"docstring": "ensure instrumentation occurs when the handler raises an exception", "name": "test_handle_exceptions", "signature": "def test_handle_exceptions(self)"}, {"docstring": "ensure input event field can be omitted", "name": "test_can_omit_input", "signature": "def test_can_omit_input(self)"}, {"docstring": "ensure output event fields can be omitted", "name": "test_can_omit_output", "signature": "def test_can_omit_output(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_039446", "prompt": "Implement the Python class `TestLambdaWrapper` described below.\n\nClass description:\nImplement the TestLambdaWrapper class.\n\nMethod signatures and docstrings:\n- def test_wrapper_works_no_init(self): ensure that the wrapper doesn't break anything if used before beeline.init is called\n- def test_basic_instrumentation(self): ensure basic event fields get instrumented\n- def test_handle_exceptions(self): ensure instrumentation occurs when the handler raises an exception\n- def test_can_omit_input(self): ensure input event field can be omitted\n- def test_can_omit_output(self): ensure output event fields can be omitted", "prompted_full_text": "Implement the Python class `TestLambdaWrapper` described below.\n\nClass description:\nImplement the TestLambdaWrapper class.\n\nMethod signatures and docstrings:\n- def test_wrapper_works_no_init(self): ensure that the wrapper doesn't break anything if used before beeline.init is called\n- def test_basic_instrumentation(self): ensure basic event fields get instrumented\n- def test_handle_exceptions(self): ensure instrumentation occurs when the handler raises an exception\n- def test_can_omit_input(self): ensure input event field can be omitted\n- def test_can_omit_output(self): ensure output event fields can be omitted\n\n<|skeleton|>\nclass TestLambdaWrapper:\n\n def test_wrapper_works_no_init(self):\n \"\"\"ensure that the wrapper doesn't break anything if used before beeline.init is called\"\"\"\n <|body_0|>\n\n def test_basic_instrumentation(self):\n \"\"\"ensure basic event fields get instrumented\"\"\"\n <|body_1|>\n\n def test_handle_exceptions(self):\n \"\"\"ensure instrumentation occurs when the handler raises an exception\"\"\"\n <|body_2|>\n\n def test_can_omit_input(self):\n \"\"\"ensure input event field can be omitted\"\"\"\n <|body_3|>\n\n def test_can_omit_output(self):\n \"\"\"ensure output event fields can be omitted\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with patch('beeline.get_beeline') as p:\n p.return_value = None\n\n @awslambda.beeline_wrapper\n def foo(event, context):\n return 1\n self.assertEqual(foo(None, None), 1)\n\n @awslambda.beeline_wrapper()\n def bar(event, context):\n return 1\n self.assertEqual(bar(None, None), 1)\n<|end_body_0|>\n\n<|body_start_1|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n<|end_body_1|>\n\n<|body_start_2|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.add_context') as m_add_context, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n raise ValueError('something went wrong')\n with self.assertRaises(ValueError):\n handler(m_event, m_context)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context.assert_called_once_with({'app.exception_type': ANY, 'app.exception_string': 'something went wrong', 'app.exception_stacktrace': ANY})\n<|end_body_2|>\n\n<|body_start_3|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_input=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n<|end_body_3|>\n\n<|body_start_4|>\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_output=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.not_called_with('app.response', 1)\n<|end_body_4|>\n", "revision_id": "4e768a01b97bafe13700b32e279c5aeef3387297", "skeleton": "<|skeleton|>\nclass TestLambdaWrapper:\n\n def test_wrapper_works_no_init(self):\n \"\"\"ensure that the wrapper doesn't break anything if used before beeline.init is called\"\"\"\n <|body_0|>\n\n def test_basic_instrumentation(self):\n \"\"\"ensure basic event fields get instrumented\"\"\"\n <|body_1|>\n\n def test_handle_exceptions(self):\n \"\"\"ensure instrumentation occurs when the handler raises an exception\"\"\"\n <|body_2|>\n\n def test_can_omit_input(self):\n \"\"\"ensure input event field can be omitted\"\"\"\n <|body_3|>\n\n def test_can_omit_output(self):\n \"\"\"ensure output event fields can be omitted\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestLambdaWrapper:\n def test_wrapper_works_no_init(self):\n \"\"\"ensure that the wrapper doesn't break anything if used before beeline.init is called\"\"\"\n with patch('beeline.get_beeline') as p:\n p.return_value = None\n\n @awslambda.beeline_wrapper\n def foo(event, context):\n return 1\n self.assertEqual(foo(None, None), 1)\n\n @awslambda.beeline_wrapper()\n def bar(event, context):\n return 1\n self.assertEqual(bar(None, None), 1)\n\n def test_basic_instrumentation(self):\n \"\"\"ensure basic event fields get instrumented\"\"\"\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n\n def test_handle_exceptions(self):\n \"\"\"ensure instrumentation occurs when the handler raises an exception\"\"\"\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.add_context') as m_add_context, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper\n def handler(event, context):\n raise ValueError('something went wrong')\n with self.assertRaises(ValueError):\n handler(m_event, m_context)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context.assert_called_once_with({'app.exception_type': ANY, 'app.exception_string': 'something went wrong', 'app.exception_stacktrace': ANY})\n\n def test_can_omit_input(self):\n \"\"\"ensure input event field can be omitted\"\"\"\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_input=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.assert_called_once_with('app.response', 1)\n\n def test_can_omit_output(self):\n \"\"\"ensure output event fields can be omitted\"\"\"\n with patch('beeline.propagate_and_start_trace') as m_propagate, patch('beeline.add_context_field') as m_add_context_field, patch('beeline.middleware.awslambda.beeline._GBL'), patch('beeline.middleware.awslambda.COLD_START') as m_cold_start:\n m_event = Mock()\n m_context = Mock(function_name='fn', function_version='1.1.1', aws_request_id='12345')\n\n @awslambda.beeline_wrapper(record_output=False)\n def handler(event, context):\n return 1\n self.assertEqual(handler(m_event, m_context), 1)\n m_propagate.assert_called_once_with({'app.function_name': 'fn', 'app.function_version': '1.1.1', 'app.request_id': '12345', 'app.event': ANY, 'meta.cold_start': ANY, 'name': 'handler'}, ANY)\n m_add_context_field.not_called_with('app.response', 1)\n", "source": "the_stack_v2_python_sparse", "source_path": "beeline/middleware/awslambda/test_awslambda.py", "source_repo": "honeycombio/beeline-python", "split": "test", "star_events_count": 36} {"blob_id": "47f93c2c4d38c436afc396df0bc4bcc7aaabd30f", "bodies": ["self._bpm_records = bpm_records\nself._genotypes = genotypes\nself._logger = logger", "alleles = set()\nfor record in self._bpm_records:\n alleles.update(record.plus_strand_alleles)\nreturn list(combinations_with_replacement(alleles, 2))", "record_int_genotype = self._genotypes[record.index_num]\nif record_int_genotype == 0:\n return False\nplus_strand_alleles = record.plus_strand_alleles\nrecord_plus_genotype = convert_ab_genotype_to_nucleotide(record_int_genotype, plus_strand_alleles)\nfor allele in record_plus_genotype:\n consistent_alleles = []\n consistent_alleles.append(allele)\n if record.assay_type == 0:\n consistent_alleles.append(CHANNEL_MAP[allele])\n if not any([consistent_allele in genotype for consistent_allele in consistent_alleles]):\n return True\n absent_alleles = []\n if record_int_genotype == 1 or record_int_genotype == 3:\n absent_allele = plus_strand_alleles[0] if record_int_genotype == 3 else plus_strand_alleles[1]\n absent_alleles.append(absent_allele)\n if record.assay_type == 0:\n absent_alleles.append(CHANNEL_MAP[absent_allele])\n if any([absent_allele in genotype for absent_allele in absent_alleles]):\n return True\nreturn False", "idx2inconsistent = [False] * len(possible_genotypes)\nfor idx in range(len(possible_genotypes)):\n for record in self._bpm_records:\n if self._record_inconsistent_with_genotype(record, possible_genotypes[idx]):\n idx2inconsistent[idx] = True\n break\nreturn [genotype for genotype, is_inconsistent in zip(possible_genotypes, idx2inconsistent) if not is_inconsistent]", "possible_genotypes = self._generate_possible_genotypes()\nallowable_genotypes = self._filter_inconsistent_genotypes(possible_genotypes)\nif len(allowable_genotypes) == 1:\n return allowable_genotypes[0]\nreturn ('-', '-')", "record_names = []\nfor record in self._bpm_records:\n record_names.append(record.name)\nreturn ','.join(sorted(record_names))"], "bodies_text": "<|body_start_0|>\n self._bpm_records = bpm_records\n self._genotypes = genotypes\n self._logger = logger\n<|end_body_0|>\n\n<|body_start_1|>\n alleles = set()\n for record in self._bpm_records:\n alleles.update(record.plus_strand_alleles)\n return list(combinations_with_replacement(alleles, 2))\n<|end_body_1|>\n\n<|body_start_2|>\n record_int_genotype = self._genotypes[record.index_num]\n if record_int_genotype == 0:\n return False\n plus_strand_alleles = record.plus_strand_alleles\n record_plus_genotype = convert_ab_genotype_to_nucleotide(record_int_genotype, plus_strand_alleles)\n for allele in record_plus_genotype:\n consistent_alleles = []\n consistent_alleles.append(allele)\n if record.assay_type == 0:\n consistent_alleles.append(CHANNEL_MAP[allele])\n if not any([consistent_allele in genotype for consistent_allele in consistent_alleles]):\n return True\n absent_alleles = []\n if record_int_genotype == 1 or record_int_genotype == 3:\n absent_allele = plus_strand_alleles[0] if record_int_genotype == 3 else plus_strand_alleles[1]\n absent_alleles.append(absent_allele)\n if record.assay_type == 0:\n absent_alleles.append(CHANNEL_MAP[absent_allele])\n if any([absent_allele in genotype for absent_allele in absent_alleles]):\n return True\n return False\n<|end_body_2|>\n\n<|body_start_3|>\n idx2inconsistent = [False] * len(possible_genotypes)\n for idx in range(len(possible_genotypes)):\n for record in self._bpm_records:\n if self._record_inconsistent_with_genotype(record, possible_genotypes[idx]):\n idx2inconsistent[idx] = True\n break\n return [genotype for genotype, is_inconsistent in zip(possible_genotypes, idx2inconsistent) if not is_inconsistent]\n<|end_body_3|>\n\n<|body_start_4|>\n possible_genotypes = self._generate_possible_genotypes()\n allowable_genotypes = self._filter_inconsistent_genotypes(possible_genotypes)\n if len(allowable_genotypes) == 1:\n return allowable_genotypes[0]\n return ('-', '-')\n<|end_body_4|>\n\n<|body_start_5|>\n record_names = []\n for record in self._bpm_records:\n record_names.append(record.name)\n return ','.join(sorted(record_names))\n<|end_body_5|>\n", "class_docstring": "Class to take in a group of BPM records and output a combined genotype", "class_name": "RecordCombiner", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecordCombiner:\n \"\"\"Class to take in a group of BPM records and output a combined genotype\"\"\"\n\n def __init__(self, bpm_records, genotypes, logger):\n \"\"\"Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner\"\"\"\n <|body_0|>\n\n def _generate_possible_genotypes(self):\n \"\"\"From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand\"\"\"\n <|body_1|>\n\n def _record_inconsistent_with_genotype(self, record, genotype):\n \"\"\"Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool\"\"\"\n <|body_2|>\n\n def _filter_inconsistent_genotypes(self, possible_genotypes):\n \"\"\"Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand\"\"\"\n <|body_3|>\n\n def combine_genotypes(self):\n \"\"\"Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")\"\"\"\n <|body_4|>\n\n def combine_names(self):\n \"\"\"Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._bpm_records = bpm_records\n self._genotypes = genotypes\n self._logger = logger\n<|end_body_0|>\n\n<|body_start_1|>\n alleles = set()\n for record in self._bpm_records:\n alleles.update(record.plus_strand_alleles)\n return list(combinations_with_replacement(alleles, 2))\n<|end_body_1|>\n\n<|body_start_2|>\n record_int_genotype = self._genotypes[record.index_num]\n if record_int_genotype == 0:\n return False\n plus_strand_alleles = record.plus_strand_alleles\n record_plus_genotype = convert_ab_genotype_to_nucleotide(record_int_genotype, plus_strand_alleles)\n for allele in record_plus_genotype:\n consistent_alleles = []\n consistent_alleles.append(allele)\n if record.assay_type == 0:\n consistent_alleles.append(CHANNEL_MAP[allele])\n if not any([consistent_allele in genotype for consistent_allele in consistent_alleles]):\n return True\n absent_alleles = []\n if record_int_genotype == 1 or record_int_genotype == 3:\n absent_allele = plus_strand_alleles[0] if record_int_genotype == 3 else plus_strand_alleles[1]\n absent_alleles.append(absent_allele)\n if record.assay_type == 0:\n absent_alleles.append(CHANNEL_MAP[absent_allele])\n if any([absent_allele in genotype for absent_allele in absent_alleles]):\n return True\n return False\n<|end_body_2|>\n\n<|body_start_3|>\n idx2inconsistent = [False] * len(possible_genotypes)\n for idx in range(len(possible_genotypes)):\n for record in self._bpm_records:\n if self._record_inconsistent_with_genotype(record, possible_genotypes[idx]):\n idx2inconsistent[idx] = True\n break\n return [genotype for genotype, is_inconsistent in zip(possible_genotypes, idx2inconsistent) if not is_inconsistent]\n<|end_body_3|>\n\n<|body_start_4|>\n possible_genotypes = self._generate_possible_genotypes()\n allowable_genotypes = self._filter_inconsistent_genotypes(possible_genotypes)\n if len(allowable_genotypes) == 1:\n return allowable_genotypes[0]\n return ('-', '-')\n<|end_body_4|>\n\n<|body_start_5|>\n record_names = []\n for record in self._bpm_records:\n record_names.append(record.name)\n return ','.join(sorted(record_names))\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000050", "length_bytes": 12869, "license_type": "permissive", "methods": [{"docstring": "Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner", "name": "__init__", "signature": "def __init__(self, bpm_records, genotypes, logger)"}, {"docstring": "From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand", "name": "_generate_possible_genotypes", "signature": "def _generate_possible_genotypes(self)"}, {"docstring": "Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool", "name": "_record_inconsistent_with_genotype", "signature": "def _record_inconsistent_with_genotype(self, record, genotype)"}, {"docstring": "Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand", "name": "_filter_inconsistent_genotypes", "signature": "def _filter_inconsistent_genotypes(self, possible_genotypes)"}, {"docstring": "Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")", "name": "combine_genotypes", "signature": "def combine_genotypes(self)"}, {"docstring": "Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names", "name": "combine_names", "signature": "def combine_names(self)"}], "n_methods": 6, "prompt": "Implement the Python class `RecordCombiner` described below.\n\nClass description:\nClass to take in a group of BPM records and output a combined genotype\n\nMethod signatures and docstrings:\n- def __init__(self, bpm_records, genotypes, logger): Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner\n- def _generate_possible_genotypes(self): From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand\n- def _record_inconsistent_with_genotype(self, record, genotype): Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool\n- def _filter_inconsistent_genotypes(self, possible_genotypes): Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand\n- def combine_genotypes(self): Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")\n- def combine_names(self): Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names", "prompted_full_text": "Implement the Python class `RecordCombiner` described below.\n\nClass description:\nClass to take in a group of BPM records and output a combined genotype\n\nMethod signatures and docstrings:\n- def __init__(self, bpm_records, genotypes, logger): Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner\n- def _generate_possible_genotypes(self): From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand\n- def _record_inconsistent_with_genotype(self, record, genotype): Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool\n- def _filter_inconsistent_genotypes(self, possible_genotypes): Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand\n- def combine_genotypes(self): Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")\n- def combine_names(self): Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names\n\n<|skeleton|>\nclass RecordCombiner:\n \"\"\"Class to take in a group of BPM records and output a combined genotype\"\"\"\n\n def __init__(self, bpm_records, genotypes, logger):\n \"\"\"Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner\"\"\"\n <|body_0|>\n\n def _generate_possible_genotypes(self):\n \"\"\"From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand\"\"\"\n <|body_1|>\n\n def _record_inconsistent_with_genotype(self, record, genotype):\n \"\"\"Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool\"\"\"\n <|body_2|>\n\n def _filter_inconsistent_genotypes(self, possible_genotypes):\n \"\"\"Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand\"\"\"\n <|body_3|>\n\n def combine_genotypes(self):\n \"\"\"Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")\"\"\"\n <|body_4|>\n\n def combine_names(self):\n \"\"\"Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._bpm_records = bpm_records\n self._genotypes = genotypes\n self._logger = logger\n<|end_body_0|>\n\n<|body_start_1|>\n alleles = set()\n for record in self._bpm_records:\n alleles.update(record.plus_strand_alleles)\n return list(combinations_with_replacement(alleles, 2))\n<|end_body_1|>\n\n<|body_start_2|>\n record_int_genotype = self._genotypes[record.index_num]\n if record_int_genotype == 0:\n return False\n plus_strand_alleles = record.plus_strand_alleles\n record_plus_genotype = convert_ab_genotype_to_nucleotide(record_int_genotype, plus_strand_alleles)\n for allele in record_plus_genotype:\n consistent_alleles = []\n consistent_alleles.append(allele)\n if record.assay_type == 0:\n consistent_alleles.append(CHANNEL_MAP[allele])\n if not any([consistent_allele in genotype for consistent_allele in consistent_alleles]):\n return True\n absent_alleles = []\n if record_int_genotype == 1 or record_int_genotype == 3:\n absent_allele = plus_strand_alleles[0] if record_int_genotype == 3 else plus_strand_alleles[1]\n absent_alleles.append(absent_allele)\n if record.assay_type == 0:\n absent_alleles.append(CHANNEL_MAP[absent_allele])\n if any([absent_allele in genotype for absent_allele in absent_alleles]):\n return True\n return False\n<|end_body_2|>\n\n<|body_start_3|>\n idx2inconsistent = [False] * len(possible_genotypes)\n for idx in range(len(possible_genotypes)):\n for record in self._bpm_records:\n if self._record_inconsistent_with_genotype(record, possible_genotypes[idx]):\n idx2inconsistent[idx] = True\n break\n return [genotype for genotype, is_inconsistent in zip(possible_genotypes, idx2inconsistent) if not is_inconsistent]\n<|end_body_3|>\n\n<|body_start_4|>\n possible_genotypes = self._generate_possible_genotypes()\n allowable_genotypes = self._filter_inconsistent_genotypes(possible_genotypes)\n if len(allowable_genotypes) == 1:\n return allowable_genotypes[0]\n return ('-', '-')\n<|end_body_4|>\n\n<|body_start_5|>\n record_names = []\n for record in self._bpm_records:\n record_names.append(record.name)\n return ','.join(sorted(record_names))\n<|end_body_5|>\n", "revision_id": "cc3f2c0e359e4f5d7626e56b4c1c3b8b1f2cab4f", "skeleton": "<|skeleton|>\nclass RecordCombiner:\n \"\"\"Class to take in a group of BPM records and output a combined genotype\"\"\"\n\n def __init__(self, bpm_records, genotypes, logger):\n \"\"\"Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner\"\"\"\n <|body_0|>\n\n def _generate_possible_genotypes(self):\n \"\"\"From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand\"\"\"\n <|body_1|>\n\n def _record_inconsistent_with_genotype(self, record, genotype):\n \"\"\"Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool\"\"\"\n <|body_2|>\n\n def _filter_inconsistent_genotypes(self, possible_genotypes):\n \"\"\"Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand\"\"\"\n <|body_3|>\n\n def combine_genotypes(self):\n \"\"\"Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")\"\"\"\n <|body_4|>\n\n def combine_names(self):\n \"\"\"Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecordCombiner:\n \"\"\"Class to take in a group of BPM records and output a combined genotype\"\"\"\n\n def __init__(self, bpm_records, genotypes, logger):\n \"\"\"Create new RecordCombiner Args: bpm_records (list(BPMRecord)): Group of BPM records for a single site (typically just one) genotypes (list(genotypes)): List of all genotypes in GTC file as integers Returns: RecordCombiner\"\"\"\n self._bpm_records = bpm_records\n self._genotypes = genotypes\n self._logger = logger\n\n def _generate_possible_genotypes(self):\n \"\"\"From the alleles in the BPM records, enumerate all possible genotypes at this site Args: None Returns: list(list(string)) - A list of lists of length 2. Each inner list represents a possible genotype at this site in terms of pair of nucleotide alleles on the plus strand\"\"\"\n alleles = set()\n for record in self._bpm_records:\n alleles.update(record.plus_strand_alleles)\n return list(combinations_with_replacement(alleles, 2))\n\n def _record_inconsistent_with_genotype(self, record, genotype):\n \"\"\"Check if a particular BPM record is inconsitent with a given genotype. Genotype should be tuple of alleles on plus strand. Args: record (BPMRecord) genotype (tuple(string)) : Tuple of length 2 where each element is nucleotide allele on plus strand Returns: bool\"\"\"\n record_int_genotype = self._genotypes[record.index_num]\n if record_int_genotype == 0:\n return False\n plus_strand_alleles = record.plus_strand_alleles\n record_plus_genotype = convert_ab_genotype_to_nucleotide(record_int_genotype, plus_strand_alleles)\n for allele in record_plus_genotype:\n consistent_alleles = []\n consistent_alleles.append(allele)\n if record.assay_type == 0:\n consistent_alleles.append(CHANNEL_MAP[allele])\n if not any([consistent_allele in genotype for consistent_allele in consistent_alleles]):\n return True\n absent_alleles = []\n if record_int_genotype == 1 or record_int_genotype == 3:\n absent_allele = plus_strand_alleles[0] if record_int_genotype == 3 else plus_strand_alleles[1]\n absent_alleles.append(absent_allele)\n if record.assay_type == 0:\n absent_alleles.append(CHANNEL_MAP[absent_allele])\n if any([absent_allele in genotype for absent_allele in absent_alleles]):\n return True\n return False\n\n def _filter_inconsistent_genotypes(self, possible_genotypes):\n \"\"\"Filter the list of possible genotypes to remove those that are inconsitent with any BPM record in this group Args possible_genotypes (list(list(string)) - List of possible genotypes. Each possible genotype is a list of length 2 where each string is a nucleotide on the plus strand Returns list(list(string)) - List of genotypes consitent with assay data. Each remaining genotype is a list of length 2 where each string is a nucleotide on the plus strand\"\"\"\n idx2inconsistent = [False] * len(possible_genotypes)\n for idx in range(len(possible_genotypes)):\n for record in self._bpm_records:\n if self._record_inconsistent_with_genotype(record, possible_genotypes[idx]):\n idx2inconsistent[idx] = True\n break\n return [genotype for genotype, is_inconsistent in zip(possible_genotypes, idx2inconsistent) if not is_inconsistent]\n\n def combine_genotypes(self):\n \"\"\"Generate the combined genotype from all assays at this site Args: None Returns: (string, string): The combined genotype (on the plus strand) at this site (e.g., (\"A\", \"C\") ) No call is (\"-\", \"-\")\"\"\"\n possible_genotypes = self._generate_possible_genotypes()\n allowable_genotypes = self._filter_inconsistent_genotypes(possible_genotypes)\n if len(allowable_genotypes) == 1:\n return allowable_genotypes[0]\n return ('-', '-')\n\n def combine_names(self):\n \"\"\"Generate the combined name for thi sgroup of records Args: None Returns: string: The combined names\"\"\"\n record_names = []\n for record in self._bpm_records:\n record_names.append(record.name)\n return ','.join(sorted(record_names))\n", "source": "the_stack_v2_python_sparse", "source_path": "GenotypeFormat.py", "source_repo": "Illumina/GTCtoVCF", "split": "test", "star_events_count": 42} {"blob_id": "8b3a28c21606bd59a53cb7dedeec30a9ec736c36", "bodies": ["self.update_packages(dev=True)\nself.update_webpack_mix()\nself.update_css()\nself.remove_node_modules()", "make_full_directory(resources_path('css'))\nshutil.copyfile(self.get_template_path('_variables.scss'), resources_path('css/_variables.scss'))\nshutil.copyfile(self.get_template_path('app.scss'), resources_path('css/app.scss'))"], "bodies_text": "<|body_start_0|>\n self.update_packages(dev=True)\n self.update_webpack_mix()\n self.update_css()\n self.remove_node_modules()\n<|end_body_0|>\n\n<|body_start_1|>\n make_full_directory(resources_path('css'))\n shutil.copyfile(self.get_template_path('_variables.scss'), resources_path('css/_variables.scss'))\n shutil.copyfile(self.get_template_path('app.scss'), resources_path('css/app.scss'))\n<|end_body_1|>\n", "class_docstring": "Configure the front-end scaffolding for the application to use Bootstrap", "class_name": "Bootstrap", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Bootstrap:\n \"\"\"Configure the front-end scaffolding for the application to use Bootstrap\"\"\"\n\n def install(self):\n \"\"\"Install the preset\"\"\"\n <|body_0|>\n\n def update_css(self):\n \"\"\"Create/Override an app.scss file configured for the preset.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.update_packages(dev=True)\n self.update_webpack_mix()\n self.update_css()\n self.remove_node_modules()\n<|end_body_0|>\n\n<|body_start_1|>\n make_full_directory(resources_path('css'))\n shutil.copyfile(self.get_template_path('_variables.scss'), resources_path('css/_variables.scss'))\n shutil.copyfile(self.get_template_path('app.scss'), resources_path('css/app.scss'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000051", "length_bytes": 1056, "license_type": "permissive", "methods": [{"docstring": "Install the preset", "name": "install", "signature": "def install(self)"}, {"docstring": "Create/Override an app.scss file configured for the preset.", "name": "update_css", "signature": "def update_css(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_022810", "prompt": "Implement the Python class `Bootstrap` described below.\n\nClass description:\nConfigure the front-end scaffolding for the application to use Bootstrap\n\nMethod signatures and docstrings:\n- def install(self): Install the preset\n- def update_css(self): Create/Override an app.scss file configured for the preset.", "prompted_full_text": "Implement the Python class `Bootstrap` described below.\n\nClass description:\nConfigure the front-end scaffolding for the application to use Bootstrap\n\nMethod signatures and docstrings:\n- def install(self): Install the preset\n- def update_css(self): Create/Override an app.scss file configured for the preset.\n\n<|skeleton|>\nclass Bootstrap:\n \"\"\"Configure the front-end scaffolding for the application to use Bootstrap\"\"\"\n\n def install(self):\n \"\"\"Install the preset\"\"\"\n <|body_0|>\n\n def update_css(self):\n \"\"\"Create/Override an app.scss file configured for the preset.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.update_packages(dev=True)\n self.update_webpack_mix()\n self.update_css()\n self.remove_node_modules()\n<|end_body_0|>\n\n<|body_start_1|>\n make_full_directory(resources_path('css'))\n shutil.copyfile(self.get_template_path('_variables.scss'), resources_path('css/_variables.scss'))\n shutil.copyfile(self.get_template_path('app.scss'), resources_path('css/app.scss'))\n<|end_body_1|>\n", "revision_id": "e8e55e5fdced9f28cc8acb1577457a490e5b4b74", "skeleton": "<|skeleton|>\nclass Bootstrap:\n \"\"\"Configure the front-end scaffolding for the application to use Bootstrap\"\"\"\n\n def install(self):\n \"\"\"Install the preset\"\"\"\n <|body_0|>\n\n def update_css(self):\n \"\"\"Create/Override an app.scss file configured for the preset.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Bootstrap:\n \"\"\"Configure the front-end scaffolding for the application to use Bootstrap\"\"\"\n\n def install(self):\n \"\"\"Install the preset\"\"\"\n self.update_packages(dev=True)\n self.update_webpack_mix()\n self.update_css()\n self.remove_node_modules()\n\n def update_css(self):\n \"\"\"Create/Override an app.scss file configured for the preset.\"\"\"\n make_full_directory(resources_path('css'))\n shutil.copyfile(self.get_template_path('_variables.scss'), resources_path('css/_variables.scss'))\n shutil.copyfile(self.get_template_path('app.scss'), resources_path('css/app.scss'))\n", "source": "the_stack_v2_python_sparse", "source_path": "src/masonite/presets/Bootstrap.py", "source_repo": "MasoniteFramework/masonite", "split": "test", "star_events_count": 2173} {"blob_id": "82e9c8562484bee33918ba3d2b6f01b622fba4bb", "bodies": ["request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\nif request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\nif request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\nrequest_id.action_check('OK')\nreturn http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})", "request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\nif request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\nif request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\nrequest_id.action_check('REJ')\nreturn http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})", "request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\nif request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\nif request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\nrequest_id.action_check('OK')\nreturn http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})", "request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\nif request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\nif request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\nrequest_id.action_check('REJ')\nreturn http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})"], "bodies_text": "<|body_start_0|>\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n<|end_body_0|>\n\n<|body_start_1|>\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n<|end_body_1|>\n\n<|body_start_2|>\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n<|end_body_2|>\n\n<|body_start_3|>\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n<|end_body_3|>\n", "class_docstring": "", "class_name": "HRLeaveRequestManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HRLeaveRequestManager:\n\n def holidays_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_0|>\n\n def holidays_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_1|>\n\n def leave_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_2|>\n\n def leave_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n<|end_body_0|>\n\n<|body_start_1|>\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n<|end_body_1|>\n\n<|body_start_2|>\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n<|end_body_2|>\n\n<|body_start_3|>\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000052", "length_bytes": 4327, "license_type": "no_license", "methods": [{"docstring": "Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`", "name": "holidays_accept_action", "signature": "def holidays_accept_action(self, **kw)"}, {"docstring": "Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`", "name": "holidays_decline_action", "signature": "def holidays_decline_action(self, **kw)"}, {"docstring": "Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`", "name": "leave_accept_action", "signature": "def leave_accept_action(self, **kw)"}, {"docstring": "Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`", "name": "leave_decline_action", "signature": "def leave_decline_action(self, **kw)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_049695", "prompt": "Implement the Python class `HRLeaveRequestManager` described below.\n\nClass description:\nImplement the HRLeaveRequestManager class.\n\nMethod signatures and docstrings:\n- def holidays_accept_action(self, **kw): Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\n- def holidays_decline_action(self, **kw): Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\n- def leave_accept_action(self, **kw): Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\n- def leave_decline_action(self, **kw): Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`", "prompted_full_text": "Implement the Python class `HRLeaveRequestManager` described below.\n\nClass description:\nImplement the HRLeaveRequestManager class.\n\nMethod signatures and docstrings:\n- def holidays_accept_action(self, **kw): Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\n- def holidays_decline_action(self, **kw): Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\n- def leave_accept_action(self, **kw): Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\n- def leave_decline_action(self, **kw): Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\n\n<|skeleton|>\nclass HRLeaveRequestManager:\n\n def holidays_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_0|>\n\n def holidays_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_1|>\n\n def leave_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_2|>\n\n def leave_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n<|end_body_0|>\n\n<|body_start_1|>\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n<|end_body_1|>\n\n<|body_start_2|>\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n<|end_body_2|>\n\n<|body_start_3|>\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n<|end_body_3|>\n", "revision_id": "778dcd6e4247949daae3a40e64025b3aaf014373", "skeleton": "<|skeleton|>\nclass HRLeaveRequestManager:\n\n def holidays_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_0|>\n\n def holidays_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_1|>\n\n def leave_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_2|>\n\n def leave_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HRLeaveRequestManager:\n def holidays_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n\n def holidays_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n request_id = http.request.env['hr.holidays.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n\n def leave_accept_action(self, **kw):\n \"\"\"Accept holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('OK')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud aprobada con éxito', 'type': 'ok'})\n\n def leave_decline_action(self, **kw):\n \"\"\"Decline holidas request. @returns: :class:`werkzeug.wrappers.Response`\"\"\"\n request_id = http.request.env['hr.leave.request'].sudo().browse(int(kw['id']))\n if request_id.sudo().access_token != kw['token']:\n return http.request.render('hr_paysheet.message_template', {'message': 'Error de suplantación', 'type': 'error'})\n if request_id.sudo().state in ('OK', 'REJ'):\n return http.request.render('hr_paysheet.message_template', {'message': 'La solicitud ya ha sido procesada', 'type': 'error'})\n request_id.action_check('REJ')\n return http.request.render('hr_paysheet.message_template', {'message': 'Solicitud declinada con éxito', 'type': 'ok'})\n", "source": "the_stack_v2_python_sparse", "source_path": "hr_paysheet/controllers/hr_requests.py", "source_repo": "cardona18/ifaco", "split": "test", "star_events_count": 0} {"blob_id": "01c85ffb40f5a48c6033f1ca80304aec3963e86c", "bodies": ["year_dao = YearDAO()\ntry:\n year = year_dao.find_by_slug(year_slug)\n public_galleries = list(filter(lambda gallery: not gallery.private, year.galleries))\n return ({'year': year_dao.serialize(year_slug), 'public_galleries': [gallery.slug for gallery in public_galleries]}, 200)\nexcept NoResultFound:\n return ({'msg': 'year not found'}, 404)", "year_dao = YearDAO()\nif current_user.admin:\n try:\n year_dao.delete_detaching_galleries(year_slug)\n return ({'msg': 'year deleted'}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\nreturn ({'msg': 'not admin'}, 403)"], "bodies_text": "<|body_start_0|>\n year_dao = YearDAO()\n try:\n year = year_dao.find_by_slug(year_slug)\n public_galleries = list(filter(lambda gallery: not gallery.private, year.galleries))\n return ({'year': year_dao.serialize(year_slug), 'public_galleries': [gallery.slug for gallery in public_galleries]}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n year_dao = YearDAO()\n if current_user.admin:\n try:\n year_dao.delete_detaching_galleries(year_slug)\n return ({'msg': 'year deleted'}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n return ({'msg': 'not admin'}, 403)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Year", "detected_licenses": ["LicenseRef-scancode-public-domain", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Year:\n\n def get(self, year_slug):\n \"\"\"Get the list of public galleries of a given year\"\"\"\n <|body_0|>\n\n def delete(self, year_slug):\n \"\"\"Delete a given year\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n year_dao = YearDAO()\n try:\n year = year_dao.find_by_slug(year_slug)\n public_galleries = list(filter(lambda gallery: not gallery.private, year.galleries))\n return ({'year': year_dao.serialize(year_slug), 'public_galleries': [gallery.slug for gallery in public_galleries]}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n year_dao = YearDAO()\n if current_user.admin:\n try:\n year_dao.delete_detaching_galleries(year_slug)\n return ({'msg': 'year deleted'}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n return ({'msg': 'not admin'}, 403)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000053", "length_bytes": 20155, "license_type": "permissive", "methods": [{"docstring": "Get the list of public galleries of a given year", "name": "get", "signature": "def get(self, year_slug)"}, {"docstring": "Delete a given year", "name": "delete", "signature": "def delete(self, year_slug)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000344", "prompt": "Implement the Python class `Year` described below.\n\nClass description:\nImplement the Year class.\n\nMethod signatures and docstrings:\n- def get(self, year_slug): Get the list of public galleries of a given year\n- def delete(self, year_slug): Delete a given year", "prompted_full_text": "Implement the Python class `Year` described below.\n\nClass description:\nImplement the Year class.\n\nMethod signatures and docstrings:\n- def get(self, year_slug): Get the list of public galleries of a given year\n- def delete(self, year_slug): Delete a given year\n\n<|skeleton|>\nclass Year:\n\n def get(self, year_slug):\n \"\"\"Get the list of public galleries of a given year\"\"\"\n <|body_0|>\n\n def delete(self, year_slug):\n \"\"\"Delete a given year\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n year_dao = YearDAO()\n try:\n year = year_dao.find_by_slug(year_slug)\n public_galleries = list(filter(lambda gallery: not gallery.private, year.galleries))\n return ({'year': year_dao.serialize(year_slug), 'public_galleries': [gallery.slug for gallery in public_galleries]}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n year_dao = YearDAO()\n if current_user.admin:\n try:\n year_dao.delete_detaching_galleries(year_slug)\n return ({'msg': 'year deleted'}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n return ({'msg': 'not admin'}, 403)\n<|end_body_1|>\n", "revision_id": "2a1b53d5fa07621fa3e41b10e26af9dd32b0e874", "skeleton": "<|skeleton|>\nclass Year:\n\n def get(self, year_slug):\n \"\"\"Get the list of public galleries of a given year\"\"\"\n <|body_0|>\n\n def delete(self, year_slug):\n \"\"\"Delete a given year\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Year:\n def get(self, year_slug):\n \"\"\"Get the list of public galleries of a given year\"\"\"\n year_dao = YearDAO()\n try:\n year = year_dao.find_by_slug(year_slug)\n public_galleries = list(filter(lambda gallery: not gallery.private, year.galleries))\n return ({'year': year_dao.serialize(year_slug), 'public_galleries': [gallery.slug for gallery in public_galleries]}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n\n def delete(self, year_slug):\n \"\"\"Delete a given year\"\"\"\n year_dao = YearDAO()\n if current_user.admin:\n try:\n year_dao.delete_detaching_galleries(year_slug)\n return ({'msg': 'year deleted'}, 200)\n except NoResultFound:\n return ({'msg': 'year not found'}, 404)\n return ({'msg': 'not admin'}, 403)\n", "source": "the_stack_v2_python_sparse", "source_path": "web/app/ponthe/api/private/routes.py", "source_repo": "adriensade/Galeries", "split": "test", "star_events_count": 0} {"blob_id": "cc771cea9d2940a44d87c2c7cc68ee3e1f4ada08", "bodies": ["assert rois.shape[1] == 7 and pts.shape[1] == 3\nif isinstance(out_size, int):\n out_x = out_y = out_z = out_size\nelse:\n assert len(out_size) == 3\n for k in range(3):\n assert isinstance(out_size[k], int)\n out_x, out_y, out_z = out_size\nnum_rois = rois.shape[0]\nnum_channels = pts_feature.shape[-1]\nnum_pts = pts.shape[0]\npooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))\nargmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)\npts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)\npool_method_map = {'max': 0, 'avg': 1}\npool_method = pool_method_map[pool_method]\nroiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)\nctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)\nreturn pooled_features", "pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward\ngrad_in = grad_out.new_zeros((num_pts, num_channels))\nroiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)\nreturn (None, None, grad_in, None, None, None)"], "bodies_text": "<|body_start_0|>\n assert rois.shape[1] == 7 and pts.shape[1] == 3\n if isinstance(out_size, int):\n out_x = out_y = out_z = out_size\n else:\n assert len(out_size) == 3\n for k in range(3):\n assert isinstance(out_size[k], int)\n out_x, out_y, out_z = out_size\n num_rois = rois.shape[0]\n num_channels = pts_feature.shape[-1]\n num_pts = pts.shape[0]\n pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))\n argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)\n pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)\n pool_method_map = {'max': 0, 'avg': 1}\n pool_method = pool_method_map[pool_method]\n roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)\n ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)\n return pooled_features\n<|end_body_0|>\n\n<|body_start_1|>\n pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward\n grad_in = grad_out.new_zeros((num_pts, num_channels))\n roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)\n return (None, None, grad_in, None, None, None)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RoIAwarePool3dFunction", "detected_licenses": ["Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RoIAwarePool3dFunction:\n\n def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):\n \"\"\"Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out):\n \"\"\":param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert rois.shape[1] == 7 and pts.shape[1] == 3\n if isinstance(out_size, int):\n out_x = out_y = out_z = out_size\n else:\n assert len(out_size) == 3\n for k in range(3):\n assert isinstance(out_size[k], int)\n out_x, out_y, out_z = out_size\n num_rois = rois.shape[0]\n num_channels = pts_feature.shape[-1]\n num_pts = pts.shape[0]\n pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))\n argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)\n pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)\n pool_method_map = {'max': 0, 'avg': 1}\n pool_method = pool_method_map[pool_method]\n roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)\n ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)\n return pooled_features\n<|end_body_0|>\n\n<|body_start_1|>\n pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward\n grad_in = grad_out.new_zeros((num_pts, num_channels))\n roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)\n return (None, None, grad_in, None, None, None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000054", "length_bytes": 4075, "license_type": "permissive", "methods": [{"docstring": "Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)", "name": "forward", "signature": "def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method)"}, {"docstring": ":param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)", "name": "backward", "signature": "def backward(ctx, grad_out)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025144", "prompt": "Implement the Python class `RoIAwarePool3dFunction` described below.\n\nClass description:\nImplement the RoIAwarePool3dFunction class.\n\nMethod signatures and docstrings:\n- def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method): Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)\n- def backward(ctx, grad_out): :param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)", "prompted_full_text": "Implement the Python class `RoIAwarePool3dFunction` described below.\n\nClass description:\nImplement the RoIAwarePool3dFunction class.\n\nMethod signatures and docstrings:\n- def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method): Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)\n- def backward(ctx, grad_out): :param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)\n\n<|skeleton|>\nclass RoIAwarePool3dFunction:\n\n def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):\n \"\"\"Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out):\n \"\"\":param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert rois.shape[1] == 7 and pts.shape[1] == 3\n if isinstance(out_size, int):\n out_x = out_y = out_z = out_size\n else:\n assert len(out_size) == 3\n for k in range(3):\n assert isinstance(out_size[k], int)\n out_x, out_y, out_z = out_size\n num_rois = rois.shape[0]\n num_channels = pts_feature.shape[-1]\n num_pts = pts.shape[0]\n pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))\n argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)\n pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)\n pool_method_map = {'max': 0, 'avg': 1}\n pool_method = pool_method_map[pool_method]\n roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)\n ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)\n return pooled_features\n<|end_body_0|>\n\n<|body_start_1|>\n pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward\n grad_in = grad_out.new_zeros((num_pts, num_channels))\n roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)\n return (None, None, grad_in, None, None, None)\n<|end_body_1|>\n", "revision_id": "249d4a3122b4a52556fa124ff0411182b9cbede3", "skeleton": "<|skeleton|>\nclass RoIAwarePool3dFunction:\n\n def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):\n \"\"\"Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)\"\"\"\n <|body_0|>\n\n def backward(ctx, grad_out):\n \"\"\":param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RoIAwarePool3dFunction:\n def forward(ctx, rois, pts, pts_feature, out_size, max_pts_each_voxel, pool_method):\n \"\"\"Args: ctx: rois: (N, 7) [x, y, z, dx, dy, dz, heading] (x, y, z) is the box center pts: (npoints, 3) pts_feature: (npoints, C) out_size: int or tuple, like 7 or (7, 7, 7) max_pts_each_voxel: pool_method: 'max' or 'avg' Returns: pooled_features: (N, out_x, out_y, out_z, C)\"\"\"\n assert rois.shape[1] == 7 and pts.shape[1] == 3\n if isinstance(out_size, int):\n out_x = out_y = out_z = out_size\n else:\n assert len(out_size) == 3\n for k in range(3):\n assert isinstance(out_size[k], int)\n out_x, out_y, out_z = out_size\n num_rois = rois.shape[0]\n num_channels = pts_feature.shape[-1]\n num_pts = pts.shape[0]\n pooled_features = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels))\n argmax = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, num_channels), dtype=torch.int)\n pts_idx_of_voxels = pts_feature.new_zeros((num_rois, out_x, out_y, out_z, max_pts_each_voxel), dtype=torch.int)\n pool_method_map = {'max': 0, 'avg': 1}\n pool_method = pool_method_map[pool_method]\n roiaware_pool3d_cuda.forward(rois, pts, pts_feature, argmax, pts_idx_of_voxels, pooled_features, pool_method)\n ctx.roiaware_pool3d_for_backward = (pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels)\n return pooled_features\n\n def backward(ctx, grad_out):\n \"\"\":param grad_out: (N, out_x, out_y, out_z, C) :return: grad_in: (npoints, C)\"\"\"\n pts_idx_of_voxels, argmax, pool_method, num_pts, num_channels = ctx.roiaware_pool3d_for_backward\n grad_in = grad_out.new_zeros((num_pts, num_channels))\n roiaware_pool3d_cuda.backward(pts_idx_of_voxels, argmax, grad_out.contiguous(), grad_in, pool_method)\n return (None, None, grad_in, None, None, None)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/site_model/src/LidCamFusion/OpenPCDet/pcdet/ops/roiaware_pool3d/roiaware_pool3d_utils.py", "source_repo": "OrangeSodahub/CRLFnet", "split": "test", "star_events_count": 63} {"blob_id": "1208f0390d0d3650b192dcd5158d5b94fcacbc98", "bodies": ["self._engine = create_engine('sqlite:///a.db', echo=False)\nBase.metadata.drop_all(self._engine)\nBase.metadata.create_all(self._engine)\nself.__session = None", "if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\nreturn self.__session", "user = User(email=email, hashed_password=hashed_password)\nself._session.add(user)\nself._session.commit()\nreturn user", "if kwargs is None:\n raise InvalidRequestError\nfor k in kwargs.keys():\n if not hasattr(User, k):\n raise InvalidRequestError\ntry:\n user = self._session.query(User).filter_by(**kwargs).first()\nexcept InvalidRequestError:\n raise InvalidRequestError\nif user is None:\n raise NoResultFound\nelse:\n return user", "user = self.find_user_by(id=user_id)\nfor k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n else:\n setattr(user, k, v)\nself._session.commit()"], "bodies_text": "<|body_start_0|>\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n<|end_body_1|>\n\n<|body_start_2|>\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n<|end_body_2|>\n\n<|body_start_3|>\n if kwargs is None:\n raise InvalidRequestError\n for k in kwargs.keys():\n if not hasattr(User, k):\n raise InvalidRequestError\n try:\n user = self._session.query(User).filter_by(**kwargs).first()\n except InvalidRequestError:\n raise InvalidRequestError\n if user is None:\n raise NoResultFound\n else:\n return user\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n else:\n setattr(user, k, v)\n self._session.commit()\n<|end_body_4|>\n", "class_docstring": "Database class for SQLAlchemy", "class_name": "DB", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DB:\n \"\"\"Database class for SQLAlchemy\"\"\"\n\n def __init__(self):\n \"\"\"creates engine\"\"\"\n <|body_0|>\n\n def _session(self):\n \"\"\"creates a session\"\"\"\n <|body_1|>\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"This method saves a new user to the database\"\"\"\n <|body_2|>\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.\"\"\"\n <|body_3|>\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n<|end_body_1|>\n\n<|body_start_2|>\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n<|end_body_2|>\n\n<|body_start_3|>\n if kwargs is None:\n raise InvalidRequestError\n for k in kwargs.keys():\n if not hasattr(User, k):\n raise InvalidRequestError\n try:\n user = self._session.query(User).filter_by(**kwargs).first()\n except InvalidRequestError:\n raise InvalidRequestError\n if user is None:\n raise NoResultFound\n else:\n return user\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n else:\n setattr(user, k, v)\n self._session.commit()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000055", "length_bytes": 2314, "license_type": "no_license", "methods": [{"docstring": "creates engine", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "creates a session", "name": "_session", "signature": "def _session(self)"}, {"docstring": "This method saves a new user to the database", "name": "add_user", "signature": "def add_user(self, email: str, hashed_password: str) -> User"}, {"docstring": "This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.", "name": "find_user_by", "signature": "def find_user_by(self, **kwargs) -> User"}, {"docstring": "The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.", "name": "update_user", "signature": "def update_user(self, user_id: int, **kwargs) -> None"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_016683", "prompt": "Implement the Python class `DB` described below.\n\nClass description:\nDatabase class for SQLAlchemy\n\nMethod signatures and docstrings:\n- def __init__(self): creates engine\n- def _session(self): creates a session\n- def add_user(self, email: str, hashed_password: str) -> User: This method saves a new user to the database\n- def find_user_by(self, **kwargs) -> User: This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.\n- def update_user(self, user_id: int, **kwargs) -> None: The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.", "prompted_full_text": "Implement the Python class `DB` described below.\n\nClass description:\nDatabase class for SQLAlchemy\n\nMethod signatures and docstrings:\n- def __init__(self): creates engine\n- def _session(self): creates a session\n- def add_user(self, email: str, hashed_password: str) -> User: This method saves a new user to the database\n- def find_user_by(self, **kwargs) -> User: This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.\n- def update_user(self, user_id: int, **kwargs) -> None: The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.\n\n<|skeleton|>\nclass DB:\n \"\"\"Database class for SQLAlchemy\"\"\"\n\n def __init__(self):\n \"\"\"creates engine\"\"\"\n <|body_0|>\n\n def _session(self):\n \"\"\"creates a session\"\"\"\n <|body_1|>\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"This method saves a new user to the database\"\"\"\n <|body_2|>\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.\"\"\"\n <|body_3|>\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n<|end_body_1|>\n\n<|body_start_2|>\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n<|end_body_2|>\n\n<|body_start_3|>\n if kwargs is None:\n raise InvalidRequestError\n for k in kwargs.keys():\n if not hasattr(User, k):\n raise InvalidRequestError\n try:\n user = self._session.query(User).filter_by(**kwargs).first()\n except InvalidRequestError:\n raise InvalidRequestError\n if user is None:\n raise NoResultFound\n else:\n return user\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n else:\n setattr(user, k, v)\n self._session.commit()\n<|end_body_4|>\n", "revision_id": "2ab609541ff8b45cdc923c24d629f160ddc6f3cf", "skeleton": "<|skeleton|>\nclass DB:\n \"\"\"Database class for SQLAlchemy\"\"\"\n\n def __init__(self):\n \"\"\"creates engine\"\"\"\n <|body_0|>\n\n def _session(self):\n \"\"\"creates a session\"\"\"\n <|body_1|>\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"This method saves a new user to the database\"\"\"\n <|body_2|>\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.\"\"\"\n <|body_3|>\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DB:\n \"\"\"Database class for SQLAlchemy\"\"\"\n\n def __init__(self):\n \"\"\"creates engine\"\"\"\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n\n def _session(self):\n \"\"\"creates a session\"\"\"\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"This method saves a new user to the database\"\"\"\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"This method takes in arbitrary keyword arguments and returns the first row found in the users table as filtered by the method’s input arguments.\"\"\"\n if kwargs is None:\n raise InvalidRequestError\n for k in kwargs.keys():\n if not hasattr(User, k):\n raise InvalidRequestError\n try:\n user = self._session.query(User).filter_by(**kwargs).first()\n except InvalidRequestError:\n raise InvalidRequestError\n if user is None:\n raise NoResultFound\n else:\n return user\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"The method will use find_user_by to locate the user to update, then will update the user’s attributes as passed in the method’s arguments then commit changes to the database.\"\"\"\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n else:\n setattr(user, k, v)\n self._session.commit()\n", "source": "the_stack_v2_python_sparse", "source_path": "0x08-user_authentication_service/db.py", "source_repo": "MatriMariem/holbertonschool-web_back_end", "split": "test", "star_events_count": 0} {"blob_id": "47c0253fd5635b42d8349d3bbb30778fec40d996", "bodies": ["self.dw = dilation_width\nself.ow = opening_width\nself.normalize = normalize\nself.hb = handle_borders\nself.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)", "self.instance.computeSaliency(step_size)\nout_m = self.instance.getSaliencyMap()\nreturn np.array(out_m)", "if not dilation_width:\n dilation_width = self.dw\nif not opening_width:\n opening_width = self.ow\nif not normalize:\n normalize = self.normalize\nif not handle_borders:\n handle_borders = self.hb\nself.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)"], "bodies_text": "<|body_start_0|>\n self.dw = dilation_width\n self.ow = opening_width\n self.normalize = normalize\n self.hb = handle_borders\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n<|end_body_0|>\n\n<|body_start_1|>\n self.instance.computeSaliency(step_size)\n out_m = self.instance.getSaliencyMap()\n return np.array(out_m)\n<|end_body_1|>\n\n<|body_start_2|>\n if not dilation_width:\n dilation_width = self.dw\n if not opening_width:\n opening_width = self.ow\n if not normalize:\n normalize = self.normalize\n if not handle_borders:\n handle_borders = self.hb\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n<|end_body_2|>\n", "class_docstring": "Computes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013", "class_name": "BMS", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BMS:\n \"\"\"Computes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013\"\"\"\n\n def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True):\n \"\"\"Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders\"\"\"\n <|body_0|>\n\n def get_saliency_map(self, step_size=8):\n \"\"\"Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image\"\"\"\n <|body_1|>\n\n def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None):\n \"\"\"Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dw = dilation_width\n self.ow = opening_width\n self.normalize = normalize\n self.hb = handle_borders\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n<|end_body_0|>\n\n<|body_start_1|>\n self.instance.computeSaliency(step_size)\n out_m = self.instance.getSaliencyMap()\n return np.array(out_m)\n<|end_body_1|>\n\n<|body_start_2|>\n if not dilation_width:\n dilation_width = self.dw\n if not opening_width:\n opening_width = self.ow\n if not normalize:\n normalize = self.normalize\n if not handle_borders:\n handle_borders = self.hb\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000056", "length_bytes": 2554, "license_type": "no_license", "methods": [{"docstring": "Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders", "name": "__init__", "signature": "def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True)"}, {"docstring": "Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image", "name": "get_saliency_map", "signature": "def get_saliency_map(self, step_size=8)"}, {"docstring": "Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image", "name": "refresh", "signature": "def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None)"}], "n_methods": 3, "prompt": "Implement the Python class `BMS` described below.\n\nClass description:\nComputes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013\n\nMethod signatures and docstrings:\n- def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True): Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders\n- def get_saliency_map(self, step_size=8): Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image\n- def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None): Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image", "prompted_full_text": "Implement the Python class `BMS` described below.\n\nClass description:\nComputes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013\n\nMethod signatures and docstrings:\n- def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True): Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders\n- def get_saliency_map(self, step_size=8): Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image\n- def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None): Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image\n\n<|skeleton|>\nclass BMS:\n \"\"\"Computes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013\"\"\"\n\n def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True):\n \"\"\"Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders\"\"\"\n <|body_0|>\n\n def get_saliency_map(self, step_size=8):\n \"\"\"Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image\"\"\"\n <|body_1|>\n\n def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None):\n \"\"\"Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dw = dilation_width\n self.ow = opening_width\n self.normalize = normalize\n self.hb = handle_borders\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n<|end_body_0|>\n\n<|body_start_1|>\n self.instance.computeSaliency(step_size)\n out_m = self.instance.getSaliencyMap()\n return np.array(out_m)\n<|end_body_1|>\n\n<|body_start_2|>\n if not dilation_width:\n dilation_width = self.dw\n if not opening_width:\n opening_width = self.ow\n if not normalize:\n normalize = self.normalize\n if not handle_borders:\n handle_borders = self.hb\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n<|end_body_2|>\n", "revision_id": "5d43b37930f56d51fe12f7335b6ad6a468188ab7", "skeleton": "<|skeleton|>\nclass BMS:\n \"\"\"Computes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013\"\"\"\n\n def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True):\n \"\"\"Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders\"\"\"\n <|body_0|>\n\n def get_saliency_map(self, step_size=8):\n \"\"\"Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image\"\"\"\n <|body_1|>\n\n def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None):\n \"\"\"Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BMS:\n \"\"\"Computes Saliency map using Boolean map based saliency approach as described in: \"Saliency Detection: A Boolean Map Approach\", Jianming Zhang, Stan Sclaroff, ICCV, 2013\"\"\"\n\n def __init__(self, src, dilation_width=7, opening_width=5, normalize=True, handle_borders=True):\n \"\"\"Initialize the Boolean Map Saliency object :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders\"\"\"\n self.dw = dilation_width\n self.ow = opening_width\n self.normalize = normalize\n self.hb = handle_borders\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n\n def get_saliency_map(self, step_size=8):\n \"\"\"Returns a Salincy map of the image :param step_size: step size for sampling the threshold between 0 to 255 :return: saliency map image\"\"\"\n self.instance.computeSaliency(step_size)\n out_m = self.instance.getSaliencyMap()\n return np.array(out_m)\n\n def refresh(self, src, dilation_width=None, opening_width=None, normalize=None, handle_borders=None):\n \"\"\"Update the source image, and optionally the algorithm parameters :param src: image array whose saliency map is to be computed :param dilation_width: kernel width of the dilation operation in pixels :param opening_width: kernel width of the opening operation in pixels :param normalize: enable normalization emaphasize attention maps with small active areas :param handle_borders: enable handling of borders :return: None, use get_saliency_map to get the saliency map for the new image\"\"\"\n if not dilation_width:\n dilation_width = self.dw\n if not opening_width:\n opening_width = self.ow\n if not normalize:\n normalize = self.normalize\n if not handle_borders:\n handle_borders = self.hb\n self.instance = bmsl.BMS(bmsl.Mat.from_array(src), dilation_width, opening_width, normalize, handle_borders)\n", "source": "the_stack_v2_python_sparse", "source_path": "a5/saliency/bms.py", "source_repo": "GuruMulay/attention-window-detection-tracking-captioning", "split": "test", "star_events_count": 0} {"blob_id": "d2378e3e6ef7ce981f71c03526e7f23da58e9336", "bodies": ["if not hasattr(self, 'metamom_fitter'):\n raise RuntimeError('you need to run fit_metamom successfully first')\nreturn self.metamom_fitter", "if fitter_type == 'metamom':\n fitter = self.get_metamom_fitter()\nelse:\n fitter = super(MetaMomBootstrapper, self).get_fitter(fitter_type)\nreturn fitter", "obs = self.mb_obs_list\nif not hasattr(self, 'psf_flux_res'):\n self.fit_gal_psf_flux()\nguesser = self._get_max_guesser(guess=guess, prior=prior, widths=guess_widths)\nrunner = MetaMomRunner(wt_gmix, obs, gal_model, pars, guesser, prior=prior, intpars=self.intpars, use_logpars=self.use_logpars)\nrunner.go(ntry=ntry)\nfitter = runner.fitter\nres = fitter.get_result()\nif res['flags'] != 0:\n raise BootGalFailure('failed to fit galaxy with maxlike')\nself.metamom_fitter = fitter"], "bodies_text": "<|body_start_0|>\n if not hasattr(self, 'metamom_fitter'):\n raise RuntimeError('you need to run fit_metamom successfully first')\n return self.metamom_fitter\n<|end_body_0|>\n\n<|body_start_1|>\n if fitter_type == 'metamom':\n fitter = self.get_metamom_fitter()\n else:\n fitter = super(MetaMomBootstrapper, self).get_fitter(fitter_type)\n return fitter\n<|end_body_1|>\n\n<|body_start_2|>\n obs = self.mb_obs_list\n if not hasattr(self, 'psf_flux_res'):\n self.fit_gal_psf_flux()\n guesser = self._get_max_guesser(guess=guess, prior=prior, widths=guess_widths)\n runner = MetaMomRunner(wt_gmix, obs, gal_model, pars, guesser, prior=prior, intpars=self.intpars, use_logpars=self.use_logpars)\n runner.go(ntry=ntry)\n fitter = runner.fitter\n res = fitter.get_result()\n if res['flags'] != 0:\n raise BootGalFailure('failed to fit galaxy with maxlike')\n self.metamom_fitter = fitter\n<|end_body_2|>\n", "class_docstring": "", "class_name": "MetaMomBootstrapper", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MetaMomBootstrapper:\n\n def get_metamom_fitter(self):\n \"\"\"get the maxlike fitter for the galaxy\"\"\"\n <|body_0|>\n\n def get_fitter(self, fitter_type):\n \"\"\"get fitter by name 'max' 'isample' etc.\"\"\"\n <|body_1|>\n\n def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1):\n \"\"\"fit the galaxy. You must run fit_psf() successfully first\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not hasattr(self, 'metamom_fitter'):\n raise RuntimeError('you need to run fit_metamom successfully first')\n return self.metamom_fitter\n<|end_body_0|>\n\n<|body_start_1|>\n if fitter_type == 'metamom':\n fitter = self.get_metamom_fitter()\n else:\n fitter = super(MetaMomBootstrapper, self).get_fitter(fitter_type)\n return fitter\n<|end_body_1|>\n\n<|body_start_2|>\n obs = self.mb_obs_list\n if not hasattr(self, 'psf_flux_res'):\n self.fit_gal_psf_flux()\n guesser = self._get_max_guesser(guess=guess, prior=prior, widths=guess_widths)\n runner = MetaMomRunner(wt_gmix, obs, gal_model, pars, guesser, prior=prior, intpars=self.intpars, use_logpars=self.use_logpars)\n runner.go(ntry=ntry)\n fitter = runner.fitter\n res = fitter.get_result()\n if res['flags'] != 0:\n raise BootGalFailure('failed to fit galaxy with maxlike')\n self.metamom_fitter = fitter\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000057", "length_bytes": 9313, "license_type": "no_license", "methods": [{"docstring": "get the maxlike fitter for the galaxy", "name": "get_metamom_fitter", "signature": "def get_metamom_fitter(self)"}, {"docstring": "get fitter by name 'max' 'isample' etc.", "name": "get_fitter", "signature": "def get_fitter(self, fitter_type)"}, {"docstring": "fit the galaxy. You must run fit_psf() successfully first", "name": "fit_metamom", "signature": "def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_038752", "prompt": "Implement the Python class `MetaMomBootstrapper` described below.\n\nClass description:\nImplement the MetaMomBootstrapper class.\n\nMethod signatures and docstrings:\n- def get_metamom_fitter(self): get the maxlike fitter for the galaxy\n- def get_fitter(self, fitter_type): get fitter by name 'max' 'isample' etc.\n- def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1): fit the galaxy. You must run fit_psf() successfully first", "prompted_full_text": "Implement the Python class `MetaMomBootstrapper` described below.\n\nClass description:\nImplement the MetaMomBootstrapper class.\n\nMethod signatures and docstrings:\n- def get_metamom_fitter(self): get the maxlike fitter for the galaxy\n- def get_fitter(self, fitter_type): get fitter by name 'max' 'isample' etc.\n- def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1): fit the galaxy. You must run fit_psf() successfully first\n\n<|skeleton|>\nclass MetaMomBootstrapper:\n\n def get_metamom_fitter(self):\n \"\"\"get the maxlike fitter for the galaxy\"\"\"\n <|body_0|>\n\n def get_fitter(self, fitter_type):\n \"\"\"get fitter by name 'max' 'isample' etc.\"\"\"\n <|body_1|>\n\n def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1):\n \"\"\"fit the galaxy. You must run fit_psf() successfully first\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not hasattr(self, 'metamom_fitter'):\n raise RuntimeError('you need to run fit_metamom successfully first')\n return self.metamom_fitter\n<|end_body_0|>\n\n<|body_start_1|>\n if fitter_type == 'metamom':\n fitter = self.get_metamom_fitter()\n else:\n fitter = super(MetaMomBootstrapper, self).get_fitter(fitter_type)\n return fitter\n<|end_body_1|>\n\n<|body_start_2|>\n obs = self.mb_obs_list\n if not hasattr(self, 'psf_flux_res'):\n self.fit_gal_psf_flux()\n guesser = self._get_max_guesser(guess=guess, prior=prior, widths=guess_widths)\n runner = MetaMomRunner(wt_gmix, obs, gal_model, pars, guesser, prior=prior, intpars=self.intpars, use_logpars=self.use_logpars)\n runner.go(ntry=ntry)\n fitter = runner.fitter\n res = fitter.get_result()\n if res['flags'] != 0:\n raise BootGalFailure('failed to fit galaxy with maxlike')\n self.metamom_fitter = fitter\n<|end_body_2|>\n", "revision_id": "3755ad4a08d8e4d67e4594faf6ec5dd62d0b2792", "skeleton": "<|skeleton|>\nclass MetaMomBootstrapper:\n\n def get_metamom_fitter(self):\n \"\"\"get the maxlike fitter for the galaxy\"\"\"\n <|body_0|>\n\n def get_fitter(self, fitter_type):\n \"\"\"get fitter by name 'max' 'isample' etc.\"\"\"\n <|body_1|>\n\n def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1):\n \"\"\"fit the galaxy. You must run fit_psf() successfully first\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MetaMomBootstrapper:\n def get_metamom_fitter(self):\n \"\"\"get the maxlike fitter for the galaxy\"\"\"\n if not hasattr(self, 'metamom_fitter'):\n raise RuntimeError('you need to run fit_metamom successfully first')\n return self.metamom_fitter\n\n def get_fitter(self, fitter_type):\n \"\"\"get fitter by name 'max' 'isample' etc.\"\"\"\n if fitter_type == 'metamom':\n fitter = self.get_metamom_fitter()\n else:\n fitter = super(MetaMomBootstrapper, self).get_fitter(fitter_type)\n return fitter\n\n def fit_metamom(self, wt_gmix, gal_model, pars, guess=None, guess_widths=None, prior=None, extra_priors=None, ntry=1):\n \"\"\"fit the galaxy. You must run fit_psf() successfully first\"\"\"\n obs = self.mb_obs_list\n if not hasattr(self, 'psf_flux_res'):\n self.fit_gal_psf_flux()\n guesser = self._get_max_guesser(guess=guess, prior=prior, widths=guess_widths)\n runner = MetaMomRunner(wt_gmix, obs, gal_model, pars, guesser, prior=prior, intpars=self.intpars, use_logpars=self.use_logpars)\n runner.go(ntry=ntry)\n fitter = runner.fitter\n res = fitter.get_result()\n if res['flags'] != 0:\n raise BootGalFailure('failed to fit galaxy with maxlike')\n self.metamom_fitter = fitter\n", "source": "the_stack_v2_python_sparse", "source_path": "nsim/bootstrappers.py", "source_repo": "esheldon/nsim", "split": "test", "star_events_count": 1} {"blob_id": "aebcf1e3210d4f79b04d859d3f2c007bee1e5f23", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\nprint('Fetching Boston population data from Data Mechanics resource')\ncolName = 'ajr10_chamathd_williami.neighborhood_pop_boston'\nurl = 'http://datamechanics.io/data/ajr10_chamathd_williami/boston_neighborhood_census.json'\nresponse = requests.get(url).text\nr = json.loads(response)\nrepo.dropCollection(colName)\nrepo.createCollection(colName)\nprint('Inserting JSON data into collection', colName)\nrepo[colName].insert_many(r['neighborhoods'])\nprint('Finished writing data to', colName)\nprint()\nprint('Fetching Cambridge population data from Cambridge Open Data')\ncolName = 'ajr10_chamathd_williami.neighborhood_pop_cambridge'\nsocrataClient = sodapy.Socrata('data.cambridgema.gov', None)\nresponse = socrataClient.get('vacj-bzri', limit=50)\nr = json.loads(json.dumps(response, sort_keys=True, indent=2))\nrepo.dropCollection(colName)\nrepo.createCollection(colName)\nprint('Inserting JSON data into collection', colName)\nrepo[colName].insert_many(r)\nprint('Finished writing data to', colName)\nprint()\nrepo.logout()\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\ndoc.add_namespace('cma', 'https://data.cambridgema.gov/resource/')\nthis_script = doc.agent('alg:ajr10_chamathd_williami#fetch_neighborhood_pop_data', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\nneighborhood_pop_boston_res = doc.entity('dat:ajr10_chamathd_williami/boston_neighborhood_census.json', {'prov:label': 'Boston Neighborhood Census', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nneighborhood_pop_cambridge_res = doc.entity('cma:vacj-bzri', {'prov:label': '2010 Cambridge Census Data by Neighborhood', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nget_neighborhood_pop_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\nget_neighborhood_pop_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.wasAssociatedWith(get_neighborhood_pop_boston, this_script)\ndoc.wasAssociatedWith(get_neighborhood_pop_cambridge, this_script)\ndoc.usage(get_neighborhood_pop_boston, neighborhood_pop_boston_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Boston'})\ndoc.usage(get_neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Cambridge'})\nneighborhood_pop_boston = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_boston', {prov.model.PROV_LABEL: 'Boston Open Budget - Neighborhood Boundaries', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(neighborhood_pop_boston, this_script)\ndoc.wasGeneratedBy(neighborhood_pop_boston, get_neighborhood_pop_boston, endTime)\ndoc.wasDerivedFrom(neighborhood_pop_boston, neighborhood_pop_boston_res, get_neighborhood_pop_boston, get_neighborhood_pop_boston, get_neighborhood_pop_boston)\nneighborhood_pop_cambridge = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_cambridge', {prov.model.PROV_LABEL: 'Cambridge Neighborhood Polygons', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(neighborhood_pop_cambridge, this_script)\ndoc.wasGeneratedBy(neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, endTime)\ndoc.wasDerivedFrom(neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge)\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n print('Fetching Boston population data from Data Mechanics resource')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_boston'\n url = 'http://datamechanics.io/data/ajr10_chamathd_williami/boston_neighborhood_census.json'\n response = requests.get(url).text\n r = json.loads(response)\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r['neighborhoods'])\n print('Finished writing data to', colName)\n print()\n print('Fetching Cambridge population data from Cambridge Open Data')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_cambridge'\n socrataClient = sodapy.Socrata('data.cambridgema.gov', None)\n response = socrataClient.get('vacj-bzri', limit=50)\n r = json.loads(json.dumps(response, sort_keys=True, indent=2))\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r)\n print('Finished writing data to', colName)\n print()\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('cma', 'https://data.cambridgema.gov/resource/')\n this_script = doc.agent('alg:ajr10_chamathd_williami#fetch_neighborhood_pop_data', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n neighborhood_pop_boston_res = doc.entity('dat:ajr10_chamathd_williami/boston_neighborhood_census.json', {'prov:label': 'Boston Neighborhood Census', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n neighborhood_pop_cambridge_res = doc.entity('cma:vacj-bzri', {'prov:label': '2010 Cambridge Census Data by Neighborhood', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_neighborhood_pop_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_neighborhood_pop_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_neighborhood_pop_boston, this_script)\n doc.wasAssociatedWith(get_neighborhood_pop_cambridge, this_script)\n doc.usage(get_neighborhood_pop_boston, neighborhood_pop_boston_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Boston'})\n doc.usage(get_neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Cambridge'})\n neighborhood_pop_boston = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_boston', {prov.model.PROV_LABEL: 'Boston Open Budget - Neighborhood Boundaries', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_boston, this_script)\n doc.wasGeneratedBy(neighborhood_pop_boston, get_neighborhood_pop_boston, endTime)\n doc.wasDerivedFrom(neighborhood_pop_boston, neighborhood_pop_boston_res, get_neighborhood_pop_boston, get_neighborhood_pop_boston, get_neighborhood_pop_boston)\n neighborhood_pop_cambridge = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_cambridge', {prov.model.PROV_LABEL: 'Cambridge Neighborhood Polygons', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_cambridge, this_script)\n doc.wasGeneratedBy(neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, endTime)\n doc.wasDerivedFrom(neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge)\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "fetch_neighborhood_pop_data", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass fetch_neighborhood_pop_data:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets for the MongoDB collection.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n print('Fetching Boston population data from Data Mechanics resource')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_boston'\n url = 'http://datamechanics.io/data/ajr10_chamathd_williami/boston_neighborhood_census.json'\n response = requests.get(url).text\n r = json.loads(response)\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r['neighborhoods'])\n print('Finished writing data to', colName)\n print()\n print('Fetching Cambridge population data from Cambridge Open Data')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_cambridge'\n socrataClient = sodapy.Socrata('data.cambridgema.gov', None)\n response = socrataClient.get('vacj-bzri', limit=50)\n r = json.loads(json.dumps(response, sort_keys=True, indent=2))\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r)\n print('Finished writing data to', colName)\n print()\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('cma', 'https://data.cambridgema.gov/resource/')\n this_script = doc.agent('alg:ajr10_chamathd_williami#fetch_neighborhood_pop_data', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n neighborhood_pop_boston_res = doc.entity('dat:ajr10_chamathd_williami/boston_neighborhood_census.json', {'prov:label': 'Boston Neighborhood Census', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n neighborhood_pop_cambridge_res = doc.entity('cma:vacj-bzri', {'prov:label': '2010 Cambridge Census Data by Neighborhood', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_neighborhood_pop_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_neighborhood_pop_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_neighborhood_pop_boston, this_script)\n doc.wasAssociatedWith(get_neighborhood_pop_cambridge, this_script)\n doc.usage(get_neighborhood_pop_boston, neighborhood_pop_boston_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Boston'})\n doc.usage(get_neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Cambridge'})\n neighborhood_pop_boston = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_boston', {prov.model.PROV_LABEL: 'Boston Open Budget - Neighborhood Boundaries', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_boston, this_script)\n doc.wasGeneratedBy(neighborhood_pop_boston, get_neighborhood_pop_boston, endTime)\n doc.wasDerivedFrom(neighborhood_pop_boston, neighborhood_pop_boston_res, get_neighborhood_pop_boston, get_neighborhood_pop_boston, get_neighborhood_pop_boston)\n neighborhood_pop_cambridge = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_cambridge', {prov.model.PROV_LABEL: 'Cambridge Neighborhood Polygons', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_cambridge, this_script)\n doc.wasGeneratedBy(neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, endTime)\n doc.wasDerivedFrom(neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge)\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000058", "length_bytes": 6063, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets for the MongoDB collection.", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052743", "prompt": "Implement the Python class `fetch_neighborhood_pop_data` described below.\n\nClass description:\nImplement the fetch_neighborhood_pop_data class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets for the MongoDB collection.\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `fetch_neighborhood_pop_data` described below.\n\nClass description:\nImplement the fetch_neighborhood_pop_data class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets for the MongoDB collection.\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass fetch_neighborhood_pop_data:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets for the MongoDB collection.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n print('Fetching Boston population data from Data Mechanics resource')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_boston'\n url = 'http://datamechanics.io/data/ajr10_chamathd_williami/boston_neighborhood_census.json'\n response = requests.get(url).text\n r = json.loads(response)\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r['neighborhoods'])\n print('Finished writing data to', colName)\n print()\n print('Fetching Cambridge population data from Cambridge Open Data')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_cambridge'\n socrataClient = sodapy.Socrata('data.cambridgema.gov', None)\n response = socrataClient.get('vacj-bzri', limit=50)\n r = json.loads(json.dumps(response, sort_keys=True, indent=2))\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r)\n print('Finished writing data to', colName)\n print()\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('cma', 'https://data.cambridgema.gov/resource/')\n this_script = doc.agent('alg:ajr10_chamathd_williami#fetch_neighborhood_pop_data', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n neighborhood_pop_boston_res = doc.entity('dat:ajr10_chamathd_williami/boston_neighborhood_census.json', {'prov:label': 'Boston Neighborhood Census', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n neighborhood_pop_cambridge_res = doc.entity('cma:vacj-bzri', {'prov:label': '2010 Cambridge Census Data by Neighborhood', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_neighborhood_pop_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_neighborhood_pop_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_neighborhood_pop_boston, this_script)\n doc.wasAssociatedWith(get_neighborhood_pop_cambridge, this_script)\n doc.usage(get_neighborhood_pop_boston, neighborhood_pop_boston_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Boston'})\n doc.usage(get_neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Cambridge'})\n neighborhood_pop_boston = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_boston', {prov.model.PROV_LABEL: 'Boston Open Budget - Neighborhood Boundaries', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_boston, this_script)\n doc.wasGeneratedBy(neighborhood_pop_boston, get_neighborhood_pop_boston, endTime)\n doc.wasDerivedFrom(neighborhood_pop_boston, neighborhood_pop_boston_res, get_neighborhood_pop_boston, get_neighborhood_pop_boston, get_neighborhood_pop_boston)\n neighborhood_pop_cambridge = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_cambridge', {prov.model.PROV_LABEL: 'Cambridge Neighborhood Polygons', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_cambridge, this_script)\n doc.wasGeneratedBy(neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, endTime)\n doc.wasDerivedFrom(neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge)\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "0df485d0469c5451ebdcd684bed2a0960ba3ab84", "skeleton": "<|skeleton|>\nclass fetch_neighborhood_pop_data:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets for the MongoDB collection.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class fetch_neighborhood_pop_data:\n def execute(trial=False):\n \"\"\"Retrieve some data sets for the MongoDB collection.\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n print('Fetching Boston population data from Data Mechanics resource')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_boston'\n url = 'http://datamechanics.io/data/ajr10_chamathd_williami/boston_neighborhood_census.json'\n response = requests.get(url).text\n r = json.loads(response)\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r['neighborhoods'])\n print('Finished writing data to', colName)\n print()\n print('Fetching Cambridge population data from Cambridge Open Data')\n colName = 'ajr10_chamathd_williami.neighborhood_pop_cambridge'\n socrataClient = sodapy.Socrata('data.cambridgema.gov', None)\n response = socrataClient.get('vacj-bzri', limit=50)\n r = json.loads(json.dumps(response, sort_keys=True, indent=2))\n repo.dropCollection(colName)\n repo.createCollection(colName)\n print('Inserting JSON data into collection', colName)\n repo[colName].insert_many(r)\n print('Finished writing data to', colName)\n print()\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_chamathd_williami', 'ajr10_chamathd_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('cma', 'https://data.cambridgema.gov/resource/')\n this_script = doc.agent('alg:ajr10_chamathd_williami#fetch_neighborhood_pop_data', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n neighborhood_pop_boston_res = doc.entity('dat:ajr10_chamathd_williami/boston_neighborhood_census.json', {'prov:label': 'Boston Neighborhood Census', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n neighborhood_pop_cambridge_res = doc.entity('cma:vacj-bzri', {'prov:label': '2010 Cambridge Census Data by Neighborhood', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_neighborhood_pop_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_neighborhood_pop_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_neighborhood_pop_boston, this_script)\n doc.wasAssociatedWith(get_neighborhood_pop_cambridge, this_script)\n doc.usage(get_neighborhood_pop_boston, neighborhood_pop_boston_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Boston'})\n doc.usage(get_neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Neighborhood+Pop+Cambridge'})\n neighborhood_pop_boston = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_boston', {prov.model.PROV_LABEL: 'Boston Open Budget - Neighborhood Boundaries', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_boston, this_script)\n doc.wasGeneratedBy(neighborhood_pop_boston, get_neighborhood_pop_boston, endTime)\n doc.wasDerivedFrom(neighborhood_pop_boston, neighborhood_pop_boston_res, get_neighborhood_pop_boston, get_neighborhood_pop_boston, get_neighborhood_pop_boston)\n neighborhood_pop_cambridge = doc.entity('dat:ajr10_chamathd_williami#neighborhood_pop_cambridge', {prov.model.PROV_LABEL: 'Cambridge Neighborhood Polygons', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(neighborhood_pop_cambridge, this_script)\n doc.wasGeneratedBy(neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, endTime)\n doc.wasDerivedFrom(neighborhood_pop_cambridge, neighborhood_pop_cambridge_res, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge, get_neighborhood_pop_cambridge)\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "ajr10_chamathd_williami/fetch_neighborhood_pop_data.py", "source_repo": "lingyigu/course-2017-spr-proj", "split": "test", "star_events_count": 0} {"blob_id": "a7df8f3d5e3efa03bbe97e531dc4549b6356831e", "bodies": ["if not identifier:\n raise ValueError('A User must have an identifier')\nuser = self.model(identifier=identifier, **extra_fields)\nuser.set_password(password)\nuser.save(using=self._db)\nreturn user", "user = self.create_user(identifier, password=password, **extra_fields)\nuser.is_staff = True\nuser.is_superuser = True\nuser.save(using=self._db)\nreturn user"], "bodies_text": "<|body_start_0|>\n if not identifier:\n raise ValueError('A User must have an identifier')\n user = self.model(identifier=identifier, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(identifier, password=password, **extra_fields)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CalendloUserManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CalendloUserManager:\n\n def create_user(self, identifier, password=None, **extra_fields):\n \"\"\"Creates and saves a User with the given identifier and password\"\"\"\n <|body_0|>\n\n def create_superuser(self, identifier, password, **extra_fields):\n \"\"\"Creates and saves a superuser with the given identifier and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not identifier:\n raise ValueError('A User must have an identifier')\n user = self.model(identifier=identifier, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(identifier, password=password, **extra_fields)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000059", "length_bytes": 980, "license_type": "no_license", "methods": [{"docstring": "Creates and saves a User with the given identifier and password", "name": "create_user", "signature": "def create_user(self, identifier, password=None, **extra_fields)"}, {"docstring": "Creates and saves a superuser with the given identifier and password.", "name": "create_superuser", "signature": "def create_superuser(self, identifier, password, **extra_fields)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042753", "prompt": "Implement the Python class `CalendloUserManager` described below.\n\nClass description:\nImplement the CalendloUserManager class.\n\nMethod signatures and docstrings:\n- def create_user(self, identifier, password=None, **extra_fields): Creates and saves a User with the given identifier and password\n- def create_superuser(self, identifier, password, **extra_fields): Creates and saves a superuser with the given identifier and password.", "prompted_full_text": "Implement the Python class `CalendloUserManager` described below.\n\nClass description:\nImplement the CalendloUserManager class.\n\nMethod signatures and docstrings:\n- def create_user(self, identifier, password=None, **extra_fields): Creates and saves a User with the given identifier and password\n- def create_superuser(self, identifier, password, **extra_fields): Creates and saves a superuser with the given identifier and password.\n\n<|skeleton|>\nclass CalendloUserManager:\n\n def create_user(self, identifier, password=None, **extra_fields):\n \"\"\"Creates and saves a User with the given identifier and password\"\"\"\n <|body_0|>\n\n def create_superuser(self, identifier, password, **extra_fields):\n \"\"\"Creates and saves a superuser with the given identifier and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not identifier:\n raise ValueError('A User must have an identifier')\n user = self.model(identifier=identifier, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n user = self.create_user(identifier, password=password, **extra_fields)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user\n<|end_body_1|>\n", "revision_id": "cbc0b44dbec6a65bb8daab3745a5b116177df655", "skeleton": "<|skeleton|>\nclass CalendloUserManager:\n\n def create_user(self, identifier, password=None, **extra_fields):\n \"\"\"Creates and saves a User with the given identifier and password\"\"\"\n <|body_0|>\n\n def create_superuser(self, identifier, password, **extra_fields):\n \"\"\"Creates and saves a superuser with the given identifier and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CalendloUserManager:\n def create_user(self, identifier, password=None, **extra_fields):\n \"\"\"Creates and saves a User with the given identifier and password\"\"\"\n if not identifier:\n raise ValueError('A User must have an identifier')\n user = self.model(identifier=identifier, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n return user\n\n def create_superuser(self, identifier, password, **extra_fields):\n \"\"\"Creates and saves a superuser with the given identifier and password.\"\"\"\n user = self.create_user(identifier, password=password, **extra_fields)\n user.is_staff = True\n user.is_superuser = True\n user.save(using=self._db)\n return user\n", "source": "the_stack_v2_python_sparse", "source_path": "accounts/managers.py", "source_repo": "luvpreetsingh/calendlo", "split": "test", "star_events_count": 0} {"blob_id": "2afea47812e4ad1064bed85ce8232c7cec5493a4", "bodies": ["err_msg = \"penalty must be 'l1' or 'l2', but got: {}\".format(penalty)\nassert penalty in ['l2', 'l1'], err_msg\nself.beta = None\nself.gamma = gamma\nself.penalty = penalty\nself.fit_intercept = fit_intercept", "if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\nl_prev = np.inf\nself.beta = np.random.rand(X.shape[1])\nfor _ in range(int(max_iter)):\n y_pred = sigmoid(np.dot(X, self.beta))\n loss = self._NLL(X, y, y_pred)\n if l_prev - loss < tol:\n return\n l_prev = loss\n self.beta -= lr * self._NLL_grad(X, y, y_pred)", "N, M = X.shape\norder = 2 if self.penalty == 'l2' else 1\nnll = -np.log(y_pred[y == 1]).sum() - np.log(1 - y_pred[y == 0]).sum()\npenalty = 0.5 * self.gamma * np.linalg.norm(self.beta, ord=order) ** 2\nreturn (penalty + nll) / N", "N, M = X.shape\np = self.penalty\nbeta = self.beta\ngamma = self.gamma\nl1norm = lambda x: np.linalg.norm(x, 1)\nd_penalty = gamma * beta if p == 'l2' else gamma * l1norm(beta) * np.sign(beta)\nreturn -(np.dot(y - y_pred, X) + d_penalty) / N", "if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\nreturn sigmoid(np.dot(X, self.beta))"], "bodies_text": "<|body_start_0|>\n err_msg = \"penalty must be 'l1' or 'l2', but got: {}\".format(penalty)\n assert penalty in ['l2', 'l1'], err_msg\n self.beta = None\n self.gamma = gamma\n self.penalty = penalty\n self.fit_intercept = fit_intercept\n<|end_body_0|>\n\n<|body_start_1|>\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n l_prev = np.inf\n self.beta = np.random.rand(X.shape[1])\n for _ in range(int(max_iter)):\n y_pred = sigmoid(np.dot(X, self.beta))\n loss = self._NLL(X, y, y_pred)\n if l_prev - loss < tol:\n return\n l_prev = loss\n self.beta -= lr * self._NLL_grad(X, y, y_pred)\n<|end_body_1|>\n\n<|body_start_2|>\n N, M = X.shape\n order = 2 if self.penalty == 'l2' else 1\n nll = -np.log(y_pred[y == 1]).sum() - np.log(1 - y_pred[y == 0]).sum()\n penalty = 0.5 * self.gamma * np.linalg.norm(self.beta, ord=order) ** 2\n return (penalty + nll) / N\n<|end_body_2|>\n\n<|body_start_3|>\n N, M = X.shape\n p = self.penalty\n beta = self.beta\n gamma = self.gamma\n l1norm = lambda x: np.linalg.norm(x, 1)\n d_penalty = gamma * beta if p == 'l2' else gamma * l1norm(beta) * np.sign(beta)\n return -(np.dot(y - y_pred, X) + d_penalty) / N\n<|end_body_3|>\n\n<|body_start_4|>\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n return sigmoid(np.dot(X, self.beta))\n<|end_body_4|>\n", "class_docstring": "", "class_name": "LogisticRegression", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogisticRegression:\n\n def __init__(self, penalty='l2', gamma=0, fit_intercept=True):\n \"\"\"A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.\"\"\"\n <|body_0|>\n\n def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0):\n \"\"\"Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.\"\"\"\n <|body_1|>\n\n def _NLL(self, X, y, y_pred):\n \"\"\"Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]\"\"\"\n <|body_2|>\n\n def _NLL_grad(self, X, y, y_pred):\n \"\"\"Gradient of the penalized negative log likelihood wrt beta\"\"\"\n <|body_3|>\n\n def predict(self, X):\n \"\"\"Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n err_msg = \"penalty must be 'l1' or 'l2', but got: {}\".format(penalty)\n assert penalty in ['l2', 'l1'], err_msg\n self.beta = None\n self.gamma = gamma\n self.penalty = penalty\n self.fit_intercept = fit_intercept\n<|end_body_0|>\n\n<|body_start_1|>\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n l_prev = np.inf\n self.beta = np.random.rand(X.shape[1])\n for _ in range(int(max_iter)):\n y_pred = sigmoid(np.dot(X, self.beta))\n loss = self._NLL(X, y, y_pred)\n if l_prev - loss < tol:\n return\n l_prev = loss\n self.beta -= lr * self._NLL_grad(X, y, y_pred)\n<|end_body_1|>\n\n<|body_start_2|>\n N, M = X.shape\n order = 2 if self.penalty == 'l2' else 1\n nll = -np.log(y_pred[y == 1]).sum() - np.log(1 - y_pred[y == 0]).sum()\n penalty = 0.5 * self.gamma * np.linalg.norm(self.beta, ord=order) ** 2\n return (penalty + nll) / N\n<|end_body_2|>\n\n<|body_start_3|>\n N, M = X.shape\n p = self.penalty\n beta = self.beta\n gamma = self.gamma\n l1norm = lambda x: np.linalg.norm(x, 1)\n d_penalty = gamma * beta if p == 'l2' else gamma * l1norm(beta) * np.sign(beta)\n return -(np.dot(y - y_pred, X) + d_penalty) / N\n<|end_body_3|>\n\n<|body_start_4|>\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n return sigmoid(np.dot(X, self.beta))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000060", "length_bytes": 4384, "license_type": "no_license", "methods": [{"docstring": "A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.", "name": "__init__", "signature": "def __init__(self, penalty='l2', gamma=0, fit_intercept=True)"}, {"docstring": "Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.", "name": "fit", "signature": "def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0)"}, {"docstring": "Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]", "name": "_NLL", "signature": "def _NLL(self, X, y, y_pred)"}, {"docstring": "Gradient of the penalized negative log likelihood wrt beta", "name": "_NLL_grad", "signature": "def _NLL_grad(self, X, y, y_pred)"}, {"docstring": "Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.", "name": "predict", "signature": "def predict(self, X)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_053528", "prompt": "Implement the Python class `LogisticRegression` described below.\n\nClass description:\nImplement the LogisticRegression class.\n\nMethod signatures and docstrings:\n- def __init__(self, penalty='l2', gamma=0, fit_intercept=True): A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.\n- def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0): Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.\n- def _NLL(self, X, y, y_pred): Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]\n- def _NLL_grad(self, X, y, y_pred): Gradient of the penalized negative log likelihood wrt beta\n- def predict(self, X): Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.", "prompted_full_text": "Implement the Python class `LogisticRegression` described below.\n\nClass description:\nImplement the LogisticRegression class.\n\nMethod signatures and docstrings:\n- def __init__(self, penalty='l2', gamma=0, fit_intercept=True): A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.\n- def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0): Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.\n- def _NLL(self, X, y, y_pred): Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]\n- def _NLL_grad(self, X, y, y_pred): Gradient of the penalized negative log likelihood wrt beta\n- def predict(self, X): Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.\n\n<|skeleton|>\nclass LogisticRegression:\n\n def __init__(self, penalty='l2', gamma=0, fit_intercept=True):\n \"\"\"A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.\"\"\"\n <|body_0|>\n\n def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0):\n \"\"\"Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.\"\"\"\n <|body_1|>\n\n def _NLL(self, X, y, y_pred):\n \"\"\"Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]\"\"\"\n <|body_2|>\n\n def _NLL_grad(self, X, y, y_pred):\n \"\"\"Gradient of the penalized negative log likelihood wrt beta\"\"\"\n <|body_3|>\n\n def predict(self, X):\n \"\"\"Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n err_msg = \"penalty must be 'l1' or 'l2', but got: {}\".format(penalty)\n assert penalty in ['l2', 'l1'], err_msg\n self.beta = None\n self.gamma = gamma\n self.penalty = penalty\n self.fit_intercept = fit_intercept\n<|end_body_0|>\n\n<|body_start_1|>\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n l_prev = np.inf\n self.beta = np.random.rand(X.shape[1])\n for _ in range(int(max_iter)):\n y_pred = sigmoid(np.dot(X, self.beta))\n loss = self._NLL(X, y, y_pred)\n if l_prev - loss < tol:\n return\n l_prev = loss\n self.beta -= lr * self._NLL_grad(X, y, y_pred)\n<|end_body_1|>\n\n<|body_start_2|>\n N, M = X.shape\n order = 2 if self.penalty == 'l2' else 1\n nll = -np.log(y_pred[y == 1]).sum() - np.log(1 - y_pred[y == 0]).sum()\n penalty = 0.5 * self.gamma * np.linalg.norm(self.beta, ord=order) ** 2\n return (penalty + nll) / N\n<|end_body_2|>\n\n<|body_start_3|>\n N, M = X.shape\n p = self.penalty\n beta = self.beta\n gamma = self.gamma\n l1norm = lambda x: np.linalg.norm(x, 1)\n d_penalty = gamma * beta if p == 'l2' else gamma * l1norm(beta) * np.sign(beta)\n return -(np.dot(y - y_pred, X) + d_penalty) / N\n<|end_body_3|>\n\n<|body_start_4|>\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n return sigmoid(np.dot(X, self.beta))\n<|end_body_4|>\n", "revision_id": "5a4cb53ad7da95250bdfd2a82268c8e083ee0ecd", "skeleton": "<|skeleton|>\nclass LogisticRegression:\n\n def __init__(self, penalty='l2', gamma=0, fit_intercept=True):\n \"\"\"A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.\"\"\"\n <|body_0|>\n\n def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0):\n \"\"\"Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.\"\"\"\n <|body_1|>\n\n def _NLL(self, X, y, y_pred):\n \"\"\"Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]\"\"\"\n <|body_2|>\n\n def _NLL_grad(self, X, y, y_pred):\n \"\"\"Gradient of the penalized negative log likelihood wrt beta\"\"\"\n <|body_3|>\n\n def predict(self, X):\n \"\"\"Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LogisticRegression:\n def __init__(self, penalty='l2', gamma=0, fit_intercept=True):\n \"\"\"A simple logistic regression model fit via gradient descent on the penalized negative log likelihood. Parameters ---------- penalty : {'l1', 'l2'} The type of regularization penalty to apply on the coefficients `beta`. Default is 'l2'. gamma : float in [0, 1] The regularization weight. Larger values correspond to larger regularization penalties, and a value of 0 indicates no penalty. Default is 0. fit_intercept : bool Whether to fit an intercept term in addition to the coefficients in b. If True, the estimates for `beta` will have `M + 1` dimensions, where the first dimension corresponds to the intercept. Default is True.\"\"\"\n err_msg = \"penalty must be 'l1' or 'l2', but got: {}\".format(penalty)\n assert penalty in ['l2', 'l1'], err_msg\n self.beta = None\n self.gamma = gamma\n self.penalty = penalty\n self.fit_intercept = fit_intercept\n\n def fit(self, X, y, lr=0.01, tol=1e-07, max_iter=10000000.0):\n \"\"\"Fit the regression coefficients via gradient descent on the negative log likelihood. Parameters ---------- X : :py:class:`ndarray ` of shape `(N, M)` A dataset consisting of `N` examples, each of dimension `M`. y : :py:class:`ndarray ` of shape `(N,)` The binary targets for each of the `N` examples in `X`. lr : float The gradient descent learning rate. Default is 1e-7. max_iter : float The maximum number of iterations to run the gradient descent solver. Default is 1e7.\"\"\"\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n l_prev = np.inf\n self.beta = np.random.rand(X.shape[1])\n for _ in range(int(max_iter)):\n y_pred = sigmoid(np.dot(X, self.beta))\n loss = self._NLL(X, y, y_pred)\n if l_prev - loss < tol:\n return\n l_prev = loss\n self.beta -= lr * self._NLL_grad(X, y, y_pred)\n\n def _NLL(self, X, y, y_pred):\n \"\"\"Penalized negative log likelihood of the targets under the current model. .. math:: \\\\text{NLL} = -\\\\frac{1}{N} \\\\left[ \\\\left(\\\\sum_{i=0}^N y_i \\\\log(\\\\hat{y}_i) + (1-y_i) log(1-\\\\hat{y}_i) \\\\right) - \\\\frac{\\\\gamma}{2} ||\\\\mathbf{b}||_2 \\\\right]\"\"\"\n N, M = X.shape\n order = 2 if self.penalty == 'l2' else 1\n nll = -np.log(y_pred[y == 1]).sum() - np.log(1 - y_pred[y == 0]).sum()\n penalty = 0.5 * self.gamma * np.linalg.norm(self.beta, ord=order) ** 2\n return (penalty + nll) / N\n\n def _NLL_grad(self, X, y, y_pred):\n \"\"\"Gradient of the penalized negative log likelihood wrt beta\"\"\"\n N, M = X.shape\n p = self.penalty\n beta = self.beta\n gamma = self.gamma\n l1norm = lambda x: np.linalg.norm(x, 1)\n d_penalty = gamma * beta if p == 'l2' else gamma * l1norm(beta) * np.sign(beta)\n return -(np.dot(y - y_pred, X) + d_penalty) / N\n\n def predict(self, X):\n \"\"\"Use the trained model to generate prediction probabilities on a new collection of data points. Parameters ---------- X : :py:class:`ndarray ` of shape `(Z, M)` A dataset consisting of `Z` new examples, each of dimension `M`. Returns ------- y_pred : :py:class:`ndarray ` of shape `(Z,)` The model prediction probabilities for the items in `X`.\"\"\"\n if self.fit_intercept:\n X = np.c_[np.ones(X.shape[0]), X]\n return sigmoid(np.dot(X, self.beta))\n", "source": "the_stack_v2_python_sparse", "source_path": "numpy-ml/Logistic_Regression.py", "source_repo": "yuki9965/my-algorithm", "split": "test", "star_events_count": 0} {"blob_id": "f0710601492a58bc94877689a7bebc6077ba39af", "bodies": ["self.total_area = float(total_area)\nself.imp_area = float(imp_area)\nself.bmp_area = float(bmp_area)", "Rv = 0.05 + 0.9 * (self.imp_area / self.total_area)\ndrainage_conversion = Rv * self.total_area * volume_conversion\nbmp_conversion = self.bmp_area * volume_conversion\nrunoff_volume = (drainage_conversion * annual_factor + bmp_conversion) * storm_depth\nreturn runoff_volume"], "bodies_text": "<|body_start_0|>\n self.total_area = float(total_area)\n self.imp_area = float(imp_area)\n self.bmp_area = float(bmp_area)\n<|end_body_0|>\n\n<|body_start_1|>\n Rv = 0.05 + 0.9 * (self.imp_area / self.total_area)\n drainage_conversion = Rv * self.total_area * volume_conversion\n bmp_conversion = self.bmp_area * volume_conversion\n runoff_volume = (drainage_conversion * annual_factor + bmp_conversion) * storm_depth\n return runoff_volume\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DrainageArea", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DrainageArea:\n\n def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):\n \"\"\"A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.\"\"\"\n <|body_0|>\n\n def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):\n \"\"\"Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.total_area = float(total_area)\n self.imp_area = float(imp_area)\n self.bmp_area = float(bmp_area)\n<|end_body_0|>\n\n<|body_start_1|>\n Rv = 0.05 + 0.9 * (self.imp_area / self.total_area)\n drainage_conversion = Rv * self.total_area * volume_conversion\n bmp_conversion = self.bmp_area * volume_conversion\n runoff_volume = (drainage_conversion * annual_factor + bmp_conversion) * storm_depth\n return runoff_volume\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000061", "length_bytes": 37198, "license_type": "permissive", "methods": [{"docstring": "A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.", "name": "__init__", "signature": "def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0)"}, {"docstring": "Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.", "name": "simple_method", "signature": "def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040538", "prompt": "Implement the Python class `DrainageArea` described below.\n\nClass description:\nImplement the DrainageArea class.\n\nMethod signatures and docstrings:\n- def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0): A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.\n- def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0): Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.", "prompted_full_text": "Implement the Python class `DrainageArea` described below.\n\nClass description:\nImplement the DrainageArea class.\n\nMethod signatures and docstrings:\n- def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0): A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.\n- def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0): Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.\n\n<|skeleton|>\nclass DrainageArea:\n\n def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):\n \"\"\"A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.\"\"\"\n <|body_0|>\n\n def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):\n \"\"\"Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.total_area = float(total_area)\n self.imp_area = float(imp_area)\n self.bmp_area = float(bmp_area)\n<|end_body_0|>\n\n<|body_start_1|>\n Rv = 0.05 + 0.9 * (self.imp_area / self.total_area)\n drainage_conversion = Rv * self.total_area * volume_conversion\n bmp_conversion = self.bmp_area * volume_conversion\n runoff_volume = (drainage_conversion * annual_factor + bmp_conversion) * storm_depth\n return runoff_volume\n<|end_body_1|>\n", "revision_id": "a88a88b771bea57cfcc7520b186de24ec96ddab1", "skeleton": "<|skeleton|>\nclass DrainageArea:\n\n def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):\n \"\"\"A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.\"\"\"\n <|body_0|>\n\n def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):\n \"\"\"Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DrainageArea:\n def __init__(self, total_area=1.0, imp_area=1.0, bmp_area=0.0):\n \"\"\"A simple object representing the drainage area of a BMP. Units are not enforced, so keep them consistent yourself. The calculations available assume that the area of the BMP and the \"total\" area are mutually exclusive. In other words, the watershed outlet is at the BMP inlet. Parameters ---------- total_area : float, optional (default = 1.0) The total geometric area of the BMP's catchment imp_area : float, optional (default = 1.0) The impervious area of the BMP's catchment bmp_area : float, optional (default = 0.0) The geometric area of the BMP itself.\"\"\"\n self.total_area = float(total_area)\n self.imp_area = float(imp_area)\n self.bmp_area = float(bmp_area)\n\n def simple_method(self, storm_depth, volume_conversion=1.0, annual_factor=1.0):\n \"\"\"Estimate runoff volume via Bob Pitt's Simple Method. Parameters ---------- storm_depth : float Depth of the storm. volume_conversion : float, optional (default = 1.0) Conversion factor to go from [area units] * [depth units] to the desired [volume units]. If [area] = m^2, [depth] = mm, and [volume] = L, then `volume_conversion` = 1. annual_factor : float, optional (default = 1.0) The Simple Method's annual correction factor to account for small storms that do not produce runoff. Returns ------- runoff_volume : float The volume of water entering the BMP immediately downstream of the drainage area.\"\"\"\n Rv = 0.05 + 0.9 * (self.imp_area / self.total_area)\n drainage_conversion = Rv * self.total_area * volume_conversion\n bmp_conversion = self.bmp_area * volume_conversion\n runoff_volume = (drainage_conversion * annual_factor + bmp_conversion) * storm_depth\n return runoff_volume\n", "source": "the_stack_v2_python_sparse", "source_path": "wqio/hydro.py", "source_repo": "phobson/wqio", "split": "test", "star_events_count": 0} {"blob_id": "e17ad0cec68331ea266bf4bcf4a1d908c1ed0049", "bodies": ["sh_commands = [command]\nif ';' in command:\n sh_commands = [cmd.strip() for cmd in command.split(';')]\nelif '&&' in command:\n sh_commands = [cmd.strip() for cmd in command.split('&&')]\nwith open(os.path.join('dashboard', 'jobs_framework', 'commands.acl'), 'r') as acl_values:\n allowed_base_commands = acl_values.read().splitlines()\n not_allowed = [cmd for cmd in sh_commands if cmd.split()[0] not in allowed_base_commands]\n if not_allowed and len(not_allowed) >= 1:\n raise Exception('Invalid command: %s' % not_allowed[0])\nreturn command", "task_subject = 'Generate POT File'\ntask_log = OrderedDict()\npot_file_path = ''\nif kwargs.get('cmd'):\n command = self._verify_command(kwargs['cmd'])\n po_dir = self.find_dir('po', input['src_tar_dir'])\n if not po_dir:\n po_dir = self.find_dir('locale', input['src_tar_dir']) or ''\n pot_file = os.path.join(po_dir, '%s.pot' % kwargs.get('domain', input['package']))\n if os.path.exists(pot_file) and kwargs.get('overwrite'):\n os.unlink(pot_file)\n try:\n os.chdir(input['src_tar_dir'])\n generate_pot = Popen(command, stdout=PIPE, shell=True)\n output, error = generate_pot.communicate()\n except Exception as e:\n os.chdir(input['base_dir'])\n task_log.update(self._log_task(input['log_f'], task_subject, 'POT file generation failed %s' % str(e)))\n else:\n os.chdir(input['base_dir'])\n if os.path.isfile(pot_file):\n pot_file_path = pot_file\n task_log.update(self._log_task(input['log_f'], task_subject, output.decode('utf-8'), text_prefix='POT file generated successfully. [ %s.pot ]' % kwargs.get('domain', input['package'])))\n else:\n error_response = 'POT file generation failed with command: %s' % command\n task_log.update(self._log_task(input['log_f'], task_subject, error_response))\n raise Exception(error_response)\nelse:\n task_log.update(self._log_task(input['log_f'], task_subject, 'Command to generate POT missing.'))\nreturn ({'src_pot_file': pot_file_path, 'i18n_domain': kwargs.get('domain', input['package'])}, {task_subject: task_log})"], "bodies_text": "<|body_start_0|>\n sh_commands = [command]\n if ';' in command:\n sh_commands = [cmd.strip() for cmd in command.split(';')]\n elif '&&' in command:\n sh_commands = [cmd.strip() for cmd in command.split('&&')]\n with open(os.path.join('dashboard', 'jobs_framework', 'commands.acl'), 'r') as acl_values:\n allowed_base_commands = acl_values.read().splitlines()\n not_allowed = [cmd for cmd in sh_commands if cmd.split()[0] not in allowed_base_commands]\n if not_allowed and len(not_allowed) >= 1:\n raise Exception('Invalid command: %s' % not_allowed[0])\n return command\n<|end_body_0|>\n\n<|body_start_1|>\n task_subject = 'Generate POT File'\n task_log = OrderedDict()\n pot_file_path = ''\n if kwargs.get('cmd'):\n command = self._verify_command(kwargs['cmd'])\n po_dir = self.find_dir('po', input['src_tar_dir'])\n if not po_dir:\n po_dir = self.find_dir('locale', input['src_tar_dir']) or ''\n pot_file = os.path.join(po_dir, '%s.pot' % kwargs.get('domain', input['package']))\n if os.path.exists(pot_file) and kwargs.get('overwrite'):\n os.unlink(pot_file)\n try:\n os.chdir(input['src_tar_dir'])\n generate_pot = Popen(command, stdout=PIPE, shell=True)\n output, error = generate_pot.communicate()\n except Exception as e:\n os.chdir(input['base_dir'])\n task_log.update(self._log_task(input['log_f'], task_subject, 'POT file generation failed %s' % str(e)))\n else:\n os.chdir(input['base_dir'])\n if os.path.isfile(pot_file):\n pot_file_path = pot_file\n task_log.update(self._log_task(input['log_f'], task_subject, output.decode('utf-8'), text_prefix='POT file generated successfully. [ %s.pot ]' % kwargs.get('domain', input['package'])))\n else:\n error_response = 'POT file generation failed with command: %s' % command\n task_log.update(self._log_task(input['log_f'], task_subject, error_response))\n raise Exception(error_response)\n else:\n task_log.update(self._log_task(input['log_f'], task_subject, 'Command to generate POT missing.'))\n return ({'src_pot_file': pot_file_path, 'i18n_domain': kwargs.get('domain', input['package'])}, {task_subject: task_log})\n<|end_body_1|>\n", "class_docstring": "Handles all operations for GENERATE Command", "class_name": "Generate", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Generate:\n \"\"\"Handles all operations for GENERATE Command\"\"\"\n\n def _verify_command(self, command):\n \"\"\"verify given command against commands.acl file :param command: str :return: filtered command\"\"\"\n <|body_0|>\n\n def pot_file(self, input, kwargs):\n \"\"\"Generates POT file as per the given command\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sh_commands = [command]\n if ';' in command:\n sh_commands = [cmd.strip() for cmd in command.split(';')]\n elif '&&' in command:\n sh_commands = [cmd.strip() for cmd in command.split('&&')]\n with open(os.path.join('dashboard', 'jobs_framework', 'commands.acl'), 'r') as acl_values:\n allowed_base_commands = acl_values.read().splitlines()\n not_allowed = [cmd for cmd in sh_commands if cmd.split()[0] not in allowed_base_commands]\n if not_allowed and len(not_allowed) >= 1:\n raise Exception('Invalid command: %s' % not_allowed[0])\n return command\n<|end_body_0|>\n\n<|body_start_1|>\n task_subject = 'Generate POT File'\n task_log = OrderedDict()\n pot_file_path = ''\n if kwargs.get('cmd'):\n command = self._verify_command(kwargs['cmd'])\n po_dir = self.find_dir('po', input['src_tar_dir'])\n if not po_dir:\n po_dir = self.find_dir('locale', input['src_tar_dir']) or ''\n pot_file = os.path.join(po_dir, '%s.pot' % kwargs.get('domain', input['package']))\n if os.path.exists(pot_file) and kwargs.get('overwrite'):\n os.unlink(pot_file)\n try:\n os.chdir(input['src_tar_dir'])\n generate_pot = Popen(command, stdout=PIPE, shell=True)\n output, error = generate_pot.communicate()\n except Exception as e:\n os.chdir(input['base_dir'])\n task_log.update(self._log_task(input['log_f'], task_subject, 'POT file generation failed %s' % str(e)))\n else:\n os.chdir(input['base_dir'])\n if os.path.isfile(pot_file):\n pot_file_path = pot_file\n task_log.update(self._log_task(input['log_f'], task_subject, output.decode('utf-8'), text_prefix='POT file generated successfully. [ %s.pot ]' % kwargs.get('domain', input['package'])))\n else:\n error_response = 'POT file generation failed with command: %s' % command\n task_log.update(self._log_task(input['log_f'], task_subject, error_response))\n raise Exception(error_response)\n else:\n task_log.update(self._log_task(input['log_f'], task_subject, 'Command to generate POT missing.'))\n return ({'src_pot_file': pot_file_path, 'i18n_domain': kwargs.get('domain', input['package'])}, {task_subject: task_log})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000062", "length_bytes": 3932, "license_type": "permissive", "methods": [{"docstring": "verify given command against commands.acl file :param command: str :return: filtered command", "name": "_verify_command", "signature": "def _verify_command(self, command)"}, {"docstring": "Generates POT file as per the given command", "name": "pot_file", "signature": "def pot_file(self, input, kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `Generate` described below.\n\nClass description:\nHandles all operations for GENERATE Command\n\nMethod signatures and docstrings:\n- def _verify_command(self, command): verify given command against commands.acl file :param command: str :return: filtered command\n- def pot_file(self, input, kwargs): Generates POT file as per the given command", "prompted_full_text": "Implement the Python class `Generate` described below.\n\nClass description:\nHandles all operations for GENERATE Command\n\nMethod signatures and docstrings:\n- def _verify_command(self, command): verify given command against commands.acl file :param command: str :return: filtered command\n- def pot_file(self, input, kwargs): Generates POT file as per the given command\n\n<|skeleton|>\nclass Generate:\n \"\"\"Handles all operations for GENERATE Command\"\"\"\n\n def _verify_command(self, command):\n \"\"\"verify given command against commands.acl file :param command: str :return: filtered command\"\"\"\n <|body_0|>\n\n def pot_file(self, input, kwargs):\n \"\"\"Generates POT file as per the given command\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sh_commands = [command]\n if ';' in command:\n sh_commands = [cmd.strip() for cmd in command.split(';')]\n elif '&&' in command:\n sh_commands = [cmd.strip() for cmd in command.split('&&')]\n with open(os.path.join('dashboard', 'jobs_framework', 'commands.acl'), 'r') as acl_values:\n allowed_base_commands = acl_values.read().splitlines()\n not_allowed = [cmd for cmd in sh_commands if cmd.split()[0] not in allowed_base_commands]\n if not_allowed and len(not_allowed) >= 1:\n raise Exception('Invalid command: %s' % not_allowed[0])\n return command\n<|end_body_0|>\n\n<|body_start_1|>\n task_subject = 'Generate POT File'\n task_log = OrderedDict()\n pot_file_path = ''\n if kwargs.get('cmd'):\n command = self._verify_command(kwargs['cmd'])\n po_dir = self.find_dir('po', input['src_tar_dir'])\n if not po_dir:\n po_dir = self.find_dir('locale', input['src_tar_dir']) or ''\n pot_file = os.path.join(po_dir, '%s.pot' % kwargs.get('domain', input['package']))\n if os.path.exists(pot_file) and kwargs.get('overwrite'):\n os.unlink(pot_file)\n try:\n os.chdir(input['src_tar_dir'])\n generate_pot = Popen(command, stdout=PIPE, shell=True)\n output, error = generate_pot.communicate()\n except Exception as e:\n os.chdir(input['base_dir'])\n task_log.update(self._log_task(input['log_f'], task_subject, 'POT file generation failed %s' % str(e)))\n else:\n os.chdir(input['base_dir'])\n if os.path.isfile(pot_file):\n pot_file_path = pot_file\n task_log.update(self._log_task(input['log_f'], task_subject, output.decode('utf-8'), text_prefix='POT file generated successfully. [ %s.pot ]' % kwargs.get('domain', input['package'])))\n else:\n error_response = 'POT file generation failed with command: %s' % command\n task_log.update(self._log_task(input['log_f'], task_subject, error_response))\n raise Exception(error_response)\n else:\n task_log.update(self._log_task(input['log_f'], task_subject, 'Command to generate POT missing.'))\n return ({'src_pot_file': pot_file_path, 'i18n_domain': kwargs.get('domain', input['package'])}, {task_subject: task_log})\n<|end_body_1|>\n", "revision_id": "1a1da6f3f92489429e20fcbbf33d01893975664b", "skeleton": "<|skeleton|>\nclass Generate:\n \"\"\"Handles all operations for GENERATE Command\"\"\"\n\n def _verify_command(self, command):\n \"\"\"verify given command against commands.acl file :param command: str :return: filtered command\"\"\"\n <|body_0|>\n\n def pot_file(self, input, kwargs):\n \"\"\"Generates POT file as per the given command\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Generate:\n \"\"\"Handles all operations for GENERATE Command\"\"\"\n\n def _verify_command(self, command):\n \"\"\"verify given command against commands.acl file :param command: str :return: filtered command\"\"\"\n sh_commands = [command]\n if ';' in command:\n sh_commands = [cmd.strip() for cmd in command.split(';')]\n elif '&&' in command:\n sh_commands = [cmd.strip() for cmd in command.split('&&')]\n with open(os.path.join('dashboard', 'jobs_framework', 'commands.acl'), 'r') as acl_values:\n allowed_base_commands = acl_values.read().splitlines()\n not_allowed = [cmd for cmd in sh_commands if cmd.split()[0] not in allowed_base_commands]\n if not_allowed and len(not_allowed) >= 1:\n raise Exception('Invalid command: %s' % not_allowed[0])\n return command\n\n def pot_file(self, input, kwargs):\n \"\"\"Generates POT file as per the given command\"\"\"\n task_subject = 'Generate POT File'\n task_log = OrderedDict()\n pot_file_path = ''\n if kwargs.get('cmd'):\n command = self._verify_command(kwargs['cmd'])\n po_dir = self.find_dir('po', input['src_tar_dir'])\n if not po_dir:\n po_dir = self.find_dir('locale', input['src_tar_dir']) or ''\n pot_file = os.path.join(po_dir, '%s.pot' % kwargs.get('domain', input['package']))\n if os.path.exists(pot_file) and kwargs.get('overwrite'):\n os.unlink(pot_file)\n try:\n os.chdir(input['src_tar_dir'])\n generate_pot = Popen(command, stdout=PIPE, shell=True)\n output, error = generate_pot.communicate()\n except Exception as e:\n os.chdir(input['base_dir'])\n task_log.update(self._log_task(input['log_f'], task_subject, 'POT file generation failed %s' % str(e)))\n else:\n os.chdir(input['base_dir'])\n if os.path.isfile(pot_file):\n pot_file_path = pot_file\n task_log.update(self._log_task(input['log_f'], task_subject, output.decode('utf-8'), text_prefix='POT file generated successfully. [ %s.pot ]' % kwargs.get('domain', input['package'])))\n else:\n error_response = 'POT file generation failed with command: %s' % command\n task_log.update(self._log_task(input['log_f'], task_subject, error_response))\n raise Exception(error_response)\n else:\n task_log.update(self._log_task(input['log_f'], task_subject, 'Command to generate POT missing.'))\n return ({'src_pot_file': pot_file_path, 'i18n_domain': kwargs.get('domain', input['package'])}, {task_subject: task_log})\n", "source": "the_stack_v2_python_sparse", "source_path": "dashboard/jobs_framework/cmds/generate.py", "source_repo": "transtats/transtats", "split": "test", "star_events_count": 39} {"blob_id": "c166be213f75d32e4cb236b34f1927c601ff7bbc", "bodies": ["with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1')\nretrieve_image_data.s.assert_called_with(self.image.pk, if_not_retrieved_since=None)\nretrieve_image_data.s.return_value.delay.assert_called_with()", "with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', retrieved=timezone.now())\nself.assertFalse(retrieve_image_data.s.called)", "with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', width=32, height=32)\nself.assertFalse(retrieve_image_data.s.called)"], "bodies_text": "<|body_start_0|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1')\n retrieve_image_data.s.assert_called_with(self.image.pk, if_not_retrieved_since=None)\n retrieve_image_data.s.return_value.delay.assert_called_with()\n<|end_body_0|>\n\n<|body_start_1|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', retrieved=timezone.now())\n self.assertFalse(retrieve_image_data.s.called)\n<|end_body_1|>\n\n<|body_start_2|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', width=32, height=32)\n self.assertFalse(retrieve_image_data.s.called)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestSignalHandler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestSignalHandler:\n\n def test_queues_retrieve_when_image_created(self):\n \"\"\"Test signal handler queues retrieve when image created.\"\"\"\n <|body_0|>\n\n def test_doesnt_queue_retrieve_when_retrieved_is_set(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n <|body_1|>\n\n def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1')\n retrieve_image_data.s.assert_called_with(self.image.pk, if_not_retrieved_since=None)\n retrieve_image_data.s.return_value.delay.assert_called_with()\n<|end_body_0|>\n\n<|body_start_1|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', retrieved=timezone.now())\n self.assertFalse(retrieve_image_data.s.called)\n<|end_body_1|>\n\n<|body_start_2|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', width=32, height=32)\n self.assertFalse(retrieve_image_data.s.called)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000063", "length_bytes": 37991, "license_type": "no_license", "methods": [{"docstring": "Test signal handler queues retrieve when image created.", "name": "test_queues_retrieve_when_image_created", "signature": "def test_queues_retrieve_when_image_created(self)"}, {"docstring": "Test signal handler doesnt queue retrieve when retrieved is set.", "name": "test_doesnt_queue_retrieve_when_retrieved_is_set", "signature": "def test_doesnt_queue_retrieve_when_retrieved_is_set(self)"}, {"docstring": "Test signal handler doesnt queue retrieve when retrieved is set.", "name": "test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small", "signature": "def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_039363", "prompt": "Implement the Python class `TestSignalHandler` described below.\n\nClass description:\nImplement the TestSignalHandler class.\n\nMethod signatures and docstrings:\n- def test_queues_retrieve_when_image_created(self): Test signal handler queues retrieve when image created.\n- def test_doesnt_queue_retrieve_when_retrieved_is_set(self): Test signal handler doesnt queue retrieve when retrieved is set.\n- def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self): Test signal handler doesnt queue retrieve when retrieved is set.", "prompted_full_text": "Implement the Python class `TestSignalHandler` described below.\n\nClass description:\nImplement the TestSignalHandler class.\n\nMethod signatures and docstrings:\n- def test_queues_retrieve_when_image_created(self): Test signal handler queues retrieve when image created.\n- def test_doesnt_queue_retrieve_when_retrieved_is_set(self): Test signal handler doesnt queue retrieve when retrieved is set.\n- def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self): Test signal handler doesnt queue retrieve when retrieved is set.\n\n<|skeleton|>\nclass TestSignalHandler:\n\n def test_queues_retrieve_when_image_created(self):\n \"\"\"Test signal handler queues retrieve when image created.\"\"\"\n <|body_0|>\n\n def test_doesnt_queue_retrieve_when_retrieved_is_set(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n <|body_1|>\n\n def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1')\n retrieve_image_data.s.assert_called_with(self.image.pk, if_not_retrieved_since=None)\n retrieve_image_data.s.return_value.delay.assert_called_with()\n<|end_body_0|>\n\n<|body_start_1|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', retrieved=timezone.now())\n self.assertFalse(retrieve_image_data.s.called)\n<|end_body_1|>\n\n<|body_start_2|>\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', width=32, height=32)\n self.assertFalse(retrieve_image_data.s.called)\n<|end_body_2|>\n", "revision_id": "0075ea457f764cbb67acecb584e927bf58d2e7a8", "skeleton": "<|skeleton|>\nclass TestSignalHandler:\n\n def test_queues_retrieve_when_image_created(self):\n \"\"\"Test signal handler queues retrieve when image created.\"\"\"\n <|body_0|>\n\n def test_doesnt_queue_retrieve_when_retrieved_is_set(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n <|body_1|>\n\n def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestSignalHandler:\n def test_queues_retrieve_when_image_created(self):\n \"\"\"Test signal handler queues retrieve when image created.\"\"\"\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1')\n retrieve_image_data.s.assert_called_with(self.image.pk, if_not_retrieved_since=None)\n retrieve_image_data.s.return_value.delay.assert_called_with()\n\n def test_doesnt_queue_retrieve_when_retrieved_is_set(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', retrieved=timezone.now())\n self.assertFalse(retrieve_image_data.s.called)\n\n def test_doesnt_queue_retrieve_when_size_knwn_to_be_too_small(self):\n \"\"\"Test signal handler doesnt queue retrieve when retrieved is set.\"\"\"\n with patch.object(tasks, 'retrieve_image_data') as retrieve_image_data:\n self.image = Image.objects.create(data_url='https://example.com/1', width=32, height=32)\n self.assertFalse(retrieve_image_data.s.called)\n", "source": "the_stack_v2_python_sparse", "source_path": "linotak/images/tests.py", "source_repo": "pdc/linotak", "split": "test", "star_events_count": 0} {"blob_id": "5f48f665c7656c9250960f1e59a7158d85b2149a", "bodies": ["number = int(request.form['number'])\nstudent = StudentModel.objects(number=number).first()\npw = request.form['pw']\nif not student:\n return Response('', 204)\npw = hexlify(pbkdf2_hmac(hash_name='sha256', password=pw.encode(), salt=current_app.secret_key.encode(), iterations=100000)).decode('utf-8')\nstudent.update(pw=pw)\nreturn Response('', 201)", "id = request.form['id']\nadmin = AdminModel.objects(id=id).first()\nif not admin:\n return Response('', 204)\nadmin.delete()\nreturn Response('', 200)"], "bodies_text": "<|body_start_0|>\n number = int(request.form['number'])\n student = StudentModel.objects(number=number).first()\n pw = request.form['pw']\n if not student:\n return Response('', 204)\n pw = hexlify(pbkdf2_hmac(hash_name='sha256', password=pw.encode(), salt=current_app.secret_key.encode(), iterations=100000)).decode('utf-8')\n student.update(pw=pw)\n return Response('', 201)\n<|end_body_0|>\n\n<|body_start_1|>\n id = request.form['id']\n admin = AdminModel.objects(id=id).first()\n if not admin:\n return Response('', 204)\n admin.delete()\n return Response('', 200)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AccountControl", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AccountControl:\n\n def post(self):\n \"\"\"학생 계정 비밀번호 변경\"\"\"\n <|body_0|>\n\n def delete(self):\n \"\"\"관리자 계정 삭제\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n number = int(request.form['number'])\n student = StudentModel.objects(number=number).first()\n pw = request.form['pw']\n if not student:\n return Response('', 204)\n pw = hexlify(pbkdf2_hmac(hash_name='sha256', password=pw.encode(), salt=current_app.secret_key.encode(), iterations=100000)).decode('utf-8')\n student.update(pw=pw)\n return Response('', 201)\n<|end_body_0|>\n\n<|body_start_1|>\n id = request.form['id']\n admin = AdminModel.objects(id=id).first()\n if not admin:\n return Response('', 204)\n admin.delete()\n return Response('', 200)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000064", "length_bytes": 1726, "license_type": "permissive", "methods": [{"docstring": "학생 계정 비밀번호 변경", "name": "post", "signature": "def post(self)"}, {"docstring": "관리자 계정 삭제", "name": "delete", "signature": "def delete(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016334", "prompt": "Implement the Python class `AccountControl` described below.\n\nClass description:\nImplement the AccountControl class.\n\nMethod signatures and docstrings:\n- def post(self): 학생 계정 비밀번호 변경\n- def delete(self): 관리자 계정 삭제", "prompted_full_text": "Implement the Python class `AccountControl` described below.\n\nClass description:\nImplement the AccountControl class.\n\nMethod signatures and docstrings:\n- def post(self): 학생 계정 비밀번호 변경\n- def delete(self): 관리자 계정 삭제\n\n<|skeleton|>\nclass AccountControl:\n\n def post(self):\n \"\"\"학생 계정 비밀번호 변경\"\"\"\n <|body_0|>\n\n def delete(self):\n \"\"\"관리자 계정 삭제\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n number = int(request.form['number'])\n student = StudentModel.objects(number=number).first()\n pw = request.form['pw']\n if not student:\n return Response('', 204)\n pw = hexlify(pbkdf2_hmac(hash_name='sha256', password=pw.encode(), salt=current_app.secret_key.encode(), iterations=100000)).decode('utf-8')\n student.update(pw=pw)\n return Response('', 201)\n<|end_body_0|>\n\n<|body_start_1|>\n id = request.form['id']\n admin = AdminModel.objects(id=id).first()\n if not admin:\n return Response('', 204)\n admin.delete()\n return Response('', 200)\n<|end_body_1|>\n", "revision_id": "de585fe904a2bf15f9fc74219eae176151a0f8ca", "skeleton": "<|skeleton|>\nclass AccountControl:\n\n def post(self):\n \"\"\"학생 계정 비밀번호 변경\"\"\"\n <|body_0|>\n\n def delete(self):\n \"\"\"관리자 계정 삭제\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AccountControl:\n def post(self):\n \"\"\"학생 계정 비밀번호 변경\"\"\"\n number = int(request.form['number'])\n student = StudentModel.objects(number=number).first()\n pw = request.form['pw']\n if not student:\n return Response('', 204)\n pw = hexlify(pbkdf2_hmac(hash_name='sha256', password=pw.encode(), salt=current_app.secret_key.encode(), iterations=100000)).decode('utf-8')\n student.update(pw=pw)\n return Response('', 201)\n\n def delete(self):\n \"\"\"관리자 계정 삭제\"\"\"\n id = request.form['id']\n admin = AdminModel.objects(id=id).first()\n if not admin:\n return Response('', 204)\n admin.delete()\n return Response('', 200)\n", "source": "the_stack_v2_python_sparse", "source_path": "Server/app/views/v1/admin/account/account_control.py", "source_repo": "miraedbswo/DMS-Backend", "split": "test", "star_events_count": 2} {"blob_id": "846278e061d7f654d3371f1c572ad3e915df55d4", "bodies": ["input_json = request.data['APIParams']\noutput_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'Payload'], [request.data['AvailabilityDetails'], request.data['AuthenticationDetails'], None]))\nfetch_all_state = self.fetch_states(input_json)\npayload_details = {'states_details': fetch_all_state}\noutput_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Successfully retrieved States.', payload_details]))\nreturn Response(output_json)", "input_json, output_json = (request, {})\ntry:\n sql = sql_exec('fetch_states_by_countryid', [input_json['country_id']])\n return sql\nexcept Exception as ex:\n output_json = dict(zip(['Status', 'Message'], ['Failure', f'Internal Database Error, operation failed: {ex}']))\n return Response(output_json)\nreturn"], "bodies_text": "<|body_start_0|>\n input_json = request.data['APIParams']\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'Payload'], [request.data['AvailabilityDetails'], request.data['AuthenticationDetails'], None]))\n fetch_all_state = self.fetch_states(input_json)\n payload_details = {'states_details': fetch_all_state}\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Successfully retrieved States.', payload_details]))\n return Response(output_json)\n<|end_body_0|>\n\n<|body_start_1|>\n input_json, output_json = (request, {})\n try:\n sql = sql_exec('fetch_states_by_countryid', [input_json['country_id']])\n return sql\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message'], ['Failure', f'Internal Database Error, operation failed: {ex}']))\n return Response(output_json)\n return\n<|end_body_1|>\n", "class_docstring": "This API cover for fetch all states.", "class_name": "GetAllStatesByCountryAPI", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetAllStatesByCountryAPI:\n \"\"\"This API cover for fetch all states.\"\"\"\n\n def post(self, request):\n \"\"\"This API cover for fetch all states.\"\"\"\n <|body_0|>\n\n def fetch_states(self, request):\n \"\"\"Function to fetch states into database.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_json = request.data['APIParams']\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'Payload'], [request.data['AvailabilityDetails'], request.data['AuthenticationDetails'], None]))\n fetch_all_state = self.fetch_states(input_json)\n payload_details = {'states_details': fetch_all_state}\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Successfully retrieved States.', payload_details]))\n return Response(output_json)\n<|end_body_0|>\n\n<|body_start_1|>\n input_json, output_json = (request, {})\n try:\n sql = sql_exec('fetch_states_by_countryid', [input_json['country_id']])\n return sql\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message'], ['Failure', f'Internal Database Error, operation failed: {ex}']))\n return Response(output_json)\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000065", "length_bytes": 1574, "license_type": "no_license", "methods": [{"docstring": "This API cover for fetch all states.", "name": "post", "signature": "def post(self, request)"}, {"docstring": "Function to fetch states into database.", "name": "fetch_states", "signature": "def fetch_states(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041145", "prompt": "Implement the Python class `GetAllStatesByCountryAPI` described below.\n\nClass description:\nThis API cover for fetch all states.\n\nMethod signatures and docstrings:\n- def post(self, request): This API cover for fetch all states.\n- def fetch_states(self, request): Function to fetch states into database.", "prompted_full_text": "Implement the Python class `GetAllStatesByCountryAPI` described below.\n\nClass description:\nThis API cover for fetch all states.\n\nMethod signatures and docstrings:\n- def post(self, request): This API cover for fetch all states.\n- def fetch_states(self, request): Function to fetch states into database.\n\n<|skeleton|>\nclass GetAllStatesByCountryAPI:\n \"\"\"This API cover for fetch all states.\"\"\"\n\n def post(self, request):\n \"\"\"This API cover for fetch all states.\"\"\"\n <|body_0|>\n\n def fetch_states(self, request):\n \"\"\"Function to fetch states into database.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n input_json = request.data['APIParams']\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'Payload'], [request.data['AvailabilityDetails'], request.data['AuthenticationDetails'], None]))\n fetch_all_state = self.fetch_states(input_json)\n payload_details = {'states_details': fetch_all_state}\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Successfully retrieved States.', payload_details]))\n return Response(output_json)\n<|end_body_0|>\n\n<|body_start_1|>\n input_json, output_json = (request, {})\n try:\n sql = sql_exec('fetch_states_by_countryid', [input_json['country_id']])\n return sql\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message'], ['Failure', f'Internal Database Error, operation failed: {ex}']))\n return Response(output_json)\n return\n<|end_body_1|>\n", "revision_id": "36eb9931f330e64902354c6fc471be2adf4b7049", "skeleton": "<|skeleton|>\nclass GetAllStatesByCountryAPI:\n \"\"\"This API cover for fetch all states.\"\"\"\n\n def post(self, request):\n \"\"\"This API cover for fetch all states.\"\"\"\n <|body_0|>\n\n def fetch_states(self, request):\n \"\"\"Function to fetch states into database.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GetAllStatesByCountryAPI:\n \"\"\"This API cover for fetch all states.\"\"\"\n\n def post(self, request):\n \"\"\"This API cover for fetch all states.\"\"\"\n input_json = request.data['APIParams']\n output_json = dict(zip(['AvailabilityDetails', 'AuthenticationDetails', 'Payload'], [request.data['AvailabilityDetails'], request.data['AuthenticationDetails'], None]))\n fetch_all_state = self.fetch_states(input_json)\n payload_details = {'states_details': fetch_all_state}\n output_json['Payload'] = dict(zip(['Status', 'Message', 'Payload'], ['Success', 'Successfully retrieved States.', payload_details]))\n return Response(output_json)\n\n def fetch_states(self, request):\n \"\"\"Function to fetch states into database.\"\"\"\n input_json, output_json = (request, {})\n try:\n sql = sql_exec('fetch_states_by_countryid', [input_json['country_id']])\n return sql\n except Exception as ex:\n output_json = dict(zip(['Status', 'Message'], ['Failure', f'Internal Database Error, operation failed: {ex}']))\n return Response(output_json)\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "Generic/common/location/api/getallstatesdetailsbycountry/views_getallstatesdetailsbycountry.py", "source_repo": "archiemb303/common_backend_django", "split": "test", "star_events_count": 0} {"blob_id": "074bfc334573f6eaa4aa8ca886dff02dc2056228", "bodies": ["id = request.GET.get('id', '')\nobj = ChildMenu.objects.filter(classification=id)\nserializer = ChildMenuSer(obj, many=True)\npageindex = request.GET.get('page', 1)\npagesize = request.GET.get('limit', 10)\npageInator = Paginator(serializer.data, pagesize)\ncontacts = pageInator.page(pageindex)\nres = []\nfor contact in contacts:\n res.append(contact)\nreturn Response(data={'code': 0, 'msg': '', 'count': len(serializer.data), 'data': res})", "data = request.data\nprint(data)\ntry:\n serializer = ChildMenuSer(data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '添加子菜单成功'\n return Response(right_code)\n error_code['error'] = '添加子菜单保存数据库异常'\nexcept Exception as e:\n error_code['error'] = '添加子菜单失败'\nreturn Response(error_code, status=status.HTTP_400_BAD_REQUEST)", "data = request.data\ntry:\n obj = ChildMenu.objects.filter(id=pk).first()\n serializer = UpdatteChildMenuSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '编辑子菜单成功'\n return Response(right_code)\n else:\n error_code['error'] = '编辑子菜单保存失败'\nexcept Exception as e:\n print(e)\n error_code['error'] = '编辑子菜单失败'\nreturn Response(error_code, status=status.HTTP_400_BAD_REQUEST)", "try:\n obj = ChildMenu.objects.filter(id=pk)\n obj.delete()\n right_code['msg'] = '删除子菜单成功'\n return Response(right_code)\nexcept Exception as e:\n print(e)\n error_code['error'] = '删除子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)"], "bodies_text": "<|body_start_0|>\n id = request.GET.get('id', '')\n obj = ChildMenu.objects.filter(classification=id)\n serializer = ChildMenuSer(obj, many=True)\n pageindex = request.GET.get('page', 1)\n pagesize = request.GET.get('limit', 10)\n pageInator = Paginator(serializer.data, pagesize)\n contacts = pageInator.page(pageindex)\n res = []\n for contact in contacts:\n res.append(contact)\n return Response(data={'code': 0, 'msg': '', 'count': len(serializer.data), 'data': res})\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n print(data)\n try:\n serializer = ChildMenuSer(data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '添加子菜单成功'\n return Response(right_code)\n error_code['error'] = '添加子菜单保存数据库异常'\n except Exception as e:\n error_code['error'] = '添加子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.data\n try:\n obj = ChildMenu.objects.filter(id=pk).first()\n serializer = UpdatteChildMenuSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '编辑子菜单成功'\n return Response(right_code)\n else:\n error_code['error'] = '编辑子菜单保存失败'\n except Exception as e:\n print(e)\n error_code['error'] = '编辑子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n obj = ChildMenu.objects.filter(id=pk)\n obj.delete()\n right_code['msg'] = '删除子菜单成功'\n return Response(right_code)\n except Exception as e:\n print(e)\n error_code['error'] = '删除子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "MenuManageChildrenList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MenuManageChildrenList:\n\n def get(self, request, *args, **kwargs):\n \"\"\"子菜单列表\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"创建子菜单\"\"\"\n <|body_1|>\n\n def put(self, request, pk, *args, **kwargs):\n \"\"\"编辑子菜单\"\"\"\n <|body_2|>\n\n def delete(self, request, pk, *args, **kwargs):\n \"\"\"删除子菜单\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n id = request.GET.get('id', '')\n obj = ChildMenu.objects.filter(classification=id)\n serializer = ChildMenuSer(obj, many=True)\n pageindex = request.GET.get('page', 1)\n pagesize = request.GET.get('limit', 10)\n pageInator = Paginator(serializer.data, pagesize)\n contacts = pageInator.page(pageindex)\n res = []\n for contact in contacts:\n res.append(contact)\n return Response(data={'code': 0, 'msg': '', 'count': len(serializer.data), 'data': res})\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n print(data)\n try:\n serializer = ChildMenuSer(data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '添加子菜单成功'\n return Response(right_code)\n error_code['error'] = '添加子菜单保存数据库异常'\n except Exception as e:\n error_code['error'] = '添加子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.data\n try:\n obj = ChildMenu.objects.filter(id=pk).first()\n serializer = UpdatteChildMenuSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '编辑子菜单成功'\n return Response(right_code)\n else:\n error_code['error'] = '编辑子菜单保存失败'\n except Exception as e:\n print(e)\n error_code['error'] = '编辑子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n obj = ChildMenu.objects.filter(id=pk)\n obj.delete()\n right_code['msg'] = '删除子菜单成功'\n return Response(right_code)\n except Exception as e:\n print(e)\n error_code['error'] = '删除子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000066", "length_bytes": 7733, "license_type": "no_license", "methods": [{"docstring": "子菜单列表", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "创建子菜单", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}, {"docstring": "编辑子菜单", "name": "put", "signature": "def put(self, request, pk, *args, **kwargs)"}, {"docstring": "删除子菜单", "name": "delete", "signature": "def delete(self, request, pk, *args, **kwargs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_043279", "prompt": "Implement the Python class `MenuManageChildrenList` described below.\n\nClass description:\nImplement the MenuManageChildrenList class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): 子菜单列表\n- def post(self, request, *args, **kwargs): 创建子菜单\n- def put(self, request, pk, *args, **kwargs): 编辑子菜单\n- def delete(self, request, pk, *args, **kwargs): 删除子菜单", "prompted_full_text": "Implement the Python class `MenuManageChildrenList` described below.\n\nClass description:\nImplement the MenuManageChildrenList class.\n\nMethod signatures and docstrings:\n- def get(self, request, *args, **kwargs): 子菜单列表\n- def post(self, request, *args, **kwargs): 创建子菜单\n- def put(self, request, pk, *args, **kwargs): 编辑子菜单\n- def delete(self, request, pk, *args, **kwargs): 删除子菜单\n\n<|skeleton|>\nclass MenuManageChildrenList:\n\n def get(self, request, *args, **kwargs):\n \"\"\"子菜单列表\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"创建子菜单\"\"\"\n <|body_1|>\n\n def put(self, request, pk, *args, **kwargs):\n \"\"\"编辑子菜单\"\"\"\n <|body_2|>\n\n def delete(self, request, pk, *args, **kwargs):\n \"\"\"删除子菜单\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n id = request.GET.get('id', '')\n obj = ChildMenu.objects.filter(classification=id)\n serializer = ChildMenuSer(obj, many=True)\n pageindex = request.GET.get('page', 1)\n pagesize = request.GET.get('limit', 10)\n pageInator = Paginator(serializer.data, pagesize)\n contacts = pageInator.page(pageindex)\n res = []\n for contact in contacts:\n res.append(contact)\n return Response(data={'code': 0, 'msg': '', 'count': len(serializer.data), 'data': res})\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n print(data)\n try:\n serializer = ChildMenuSer(data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '添加子菜单成功'\n return Response(right_code)\n error_code['error'] = '添加子菜单保存数据库异常'\n except Exception as e:\n error_code['error'] = '添加子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_1|>\n\n<|body_start_2|>\n data = request.data\n try:\n obj = ChildMenu.objects.filter(id=pk).first()\n serializer = UpdatteChildMenuSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '编辑子菜单成功'\n return Response(right_code)\n else:\n error_code['error'] = '编辑子菜单保存失败'\n except Exception as e:\n print(e)\n error_code['error'] = '编辑子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n obj = ChildMenu.objects.filter(id=pk)\n obj.delete()\n right_code['msg'] = '删除子菜单成功'\n return Response(right_code)\n except Exception as e:\n print(e)\n error_code['error'] = '删除子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_3|>\n", "revision_id": "e5247d56eb3af770dca1eeb18571281355e58c08", "skeleton": "<|skeleton|>\nclass MenuManageChildrenList:\n\n def get(self, request, *args, **kwargs):\n \"\"\"子菜单列表\"\"\"\n <|body_0|>\n\n def post(self, request, *args, **kwargs):\n \"\"\"创建子菜单\"\"\"\n <|body_1|>\n\n def put(self, request, pk, *args, **kwargs):\n \"\"\"编辑子菜单\"\"\"\n <|body_2|>\n\n def delete(self, request, pk, *args, **kwargs):\n \"\"\"删除子菜单\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MenuManageChildrenList:\n def get(self, request, *args, **kwargs):\n \"\"\"子菜单列表\"\"\"\n id = request.GET.get('id', '')\n obj = ChildMenu.objects.filter(classification=id)\n serializer = ChildMenuSer(obj, many=True)\n pageindex = request.GET.get('page', 1)\n pagesize = request.GET.get('limit', 10)\n pageInator = Paginator(serializer.data, pagesize)\n contacts = pageInator.page(pageindex)\n res = []\n for contact in contacts:\n res.append(contact)\n return Response(data={'code': 0, 'msg': '', 'count': len(serializer.data), 'data': res})\n\n def post(self, request, *args, **kwargs):\n \"\"\"创建子菜单\"\"\"\n data = request.data\n print(data)\n try:\n serializer = ChildMenuSer(data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '添加子菜单成功'\n return Response(right_code)\n error_code['error'] = '添加子菜单保存数据库异常'\n except Exception as e:\n error_code['error'] = '添加子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n\n def put(self, request, pk, *args, **kwargs):\n \"\"\"编辑子菜单\"\"\"\n data = request.data\n try:\n obj = ChildMenu.objects.filter(id=pk).first()\n serializer = UpdatteChildMenuSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n right_code['msg'] = '编辑子菜单成功'\n return Response(right_code)\n else:\n error_code['error'] = '编辑子菜单保存失败'\n except Exception as e:\n print(e)\n error_code['error'] = '编辑子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, *args, **kwargs):\n \"\"\"删除子菜单\"\"\"\n try:\n obj = ChildMenu.objects.filter(id=pk)\n obj.delete()\n right_code['msg'] = '删除子菜单成功'\n return Response(right_code)\n except Exception as e:\n print(e)\n error_code['error'] = '删除子菜单失败'\n return Response(error_code, status=status.HTTP_400_BAD_REQUEST)\n", "source": "the_stack_v2_python_sparse", "source_path": "easy/api/Menu/Menu.py", "source_repo": "zhuzhanhao1/Easytest", "split": "test", "star_events_count": 1} {"blob_id": "064dff0f666a66f4d664f7e3d05594ea055fb68a", "bodies": ["self.es = es\nself.es_index = es_index\nself.nlp = nlp\nself.source_fields = {'description': source_description_field}\nself.target_fields = {'title': target_title_field, 'description': target_description_field}\nif target_alias_field is not None:\n self.target_fields.update({'alias': target_alias_field})", "link_candidates = {}\nfor ent in spacy_doc.ents:\n ent_results = self._search_es_for_mention(ent.text, n_per_ent, reduce_to_key_fields=True)\n link_candidates[ent.text] = ent_results\nreturn link_candidates", "search_fields = [self.target_fields['title'], self.target_fields.get('alias', '')]\nquery = {'query': {'multi_match': {'query': mention, 'fuzziness': 'AUTO', 'fields': search_fields}}}\nsearch_results = self.es.search(index=self.es_index, body=query, size=n).get('hits', {}).get('hits', [])\nif reduce_to_key_fields:\n return [self._reduce_doc_to_key_fields(i) for i in search_results]\nreturn search_results", "nested_field = doc['_source']\nfields_split = field_dot_notation.split('.')\nif fields_split[0] != 'graph' and fields_split[-1] == '@value':\n fields_split = fields_split[0:-1]\nif field_dot_notation.startswith('data') and '.' in field_dot_notation[5:]:\n fields_split = ['data', field_dot_notation[5:]]\nfor idx, field in enumerate(fields_split):\n if idx + 1 < len(fields_split):\n nested_field = nested_field.get(field, {})\n else:\n nested_field = nested_field.get(field, '')\nreturn nested_field", "key_fields = set(['uri'] + list(self.source_fields.values()) + list(self.target_fields.values()))\nreduced_doc = {field: self._get_dict_field_from_dot_notation(doc, field) for field in key_fields}\nreturn {k: v for k, v in reduced_doc.items() if v != ''}"], "bodies_text": "<|body_start_0|>\n self.es = es\n self.es_index = es_index\n self.nlp = nlp\n self.source_fields = {'description': source_description_field}\n self.target_fields = {'title': target_title_field, 'description': target_description_field}\n if target_alias_field is not None:\n self.target_fields.update({'alias': target_alias_field})\n<|end_body_0|>\n\n<|body_start_1|>\n link_candidates = {}\n for ent in spacy_doc.ents:\n ent_results = self._search_es_for_mention(ent.text, n_per_ent, reduce_to_key_fields=True)\n link_candidates[ent.text] = ent_results\n return link_candidates\n<|end_body_1|>\n\n<|body_start_2|>\n search_fields = [self.target_fields['title'], self.target_fields.get('alias', '')]\n query = {'query': {'multi_match': {'query': mention, 'fuzziness': 'AUTO', 'fields': search_fields}}}\n search_results = self.es.search(index=self.es_index, body=query, size=n).get('hits', {}).get('hits', [])\n if reduce_to_key_fields:\n return [self._reduce_doc_to_key_fields(i) for i in search_results]\n return search_results\n<|end_body_2|>\n\n<|body_start_3|>\n nested_field = doc['_source']\n fields_split = field_dot_notation.split('.')\n if fields_split[0] != 'graph' and fields_split[-1] == '@value':\n fields_split = fields_split[0:-1]\n if field_dot_notation.startswith('data') and '.' in field_dot_notation[5:]:\n fields_split = ['data', field_dot_notation[5:]]\n for idx, field in enumerate(fields_split):\n if idx + 1 < len(fields_split):\n nested_field = nested_field.get(field, {})\n else:\n nested_field = nested_field.get(field, '')\n return nested_field\n<|end_body_3|>\n\n<|body_start_4|>\n key_fields = set(['uri'] + list(self.source_fields.values()) + list(self.target_fields.values()))\n reduced_doc = {field: self._get_dict_field_from_dot_notation(doc, field) for field in key_fields}\n return {k: v for k, v in reduced_doc.items() if v != ''}\n<|end_body_4|>\n", "class_docstring": "", "class_name": "ESEntityLinker", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ESEntityLinker:\n\n def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None):\n \"\"\"Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.\"\"\"\n <|body_0|>\n\n def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]:\n \"\"\"Return details of candidates for each entity mention in a spaCy doc.\"\"\"\n <|body_1|>\n\n def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]:\n \"\"\"Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.\"\"\"\n <|body_2|>\n\n def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict:\n \"\"\"Get a field from a dictonary from Elasticsearch dot notation.\"\"\"\n <|body_3|>\n\n def _reduce_doc_to_key_fields(self, doc: dict) -> dict:\n \"\"\"Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.es = es\n self.es_index = es_index\n self.nlp = nlp\n self.source_fields = {'description': source_description_field}\n self.target_fields = {'title': target_title_field, 'description': target_description_field}\n if target_alias_field is not None:\n self.target_fields.update({'alias': target_alias_field})\n<|end_body_0|>\n\n<|body_start_1|>\n link_candidates = {}\n for ent in spacy_doc.ents:\n ent_results = self._search_es_for_mention(ent.text, n_per_ent, reduce_to_key_fields=True)\n link_candidates[ent.text] = ent_results\n return link_candidates\n<|end_body_1|>\n\n<|body_start_2|>\n search_fields = [self.target_fields['title'], self.target_fields.get('alias', '')]\n query = {'query': {'multi_match': {'query': mention, 'fuzziness': 'AUTO', 'fields': search_fields}}}\n search_results = self.es.search(index=self.es_index, body=query, size=n).get('hits', {}).get('hits', [])\n if reduce_to_key_fields:\n return [self._reduce_doc_to_key_fields(i) for i in search_results]\n return search_results\n<|end_body_2|>\n\n<|body_start_3|>\n nested_field = doc['_source']\n fields_split = field_dot_notation.split('.')\n if fields_split[0] != 'graph' and fields_split[-1] == '@value':\n fields_split = fields_split[0:-1]\n if field_dot_notation.startswith('data') and '.' in field_dot_notation[5:]:\n fields_split = ['data', field_dot_notation[5:]]\n for idx, field in enumerate(fields_split):\n if idx + 1 < len(fields_split):\n nested_field = nested_field.get(field, {})\n else:\n nested_field = nested_field.get(field, '')\n return nested_field\n<|end_body_3|>\n\n<|body_start_4|>\n key_fields = set(['uri'] + list(self.source_fields.values()) + list(self.target_fields.values()))\n reduced_doc = {field: self._get_dict_field_from_dot_notation(doc, field) for field in key_fields}\n return {k: v for k, v in reduced_doc.items() if v != ''}\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000067", "length_bytes": 3987, "license_type": "permissive", "methods": [{"docstring": "Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.", "name": "__init__", "signature": "def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None)"}, {"docstring": "Return details of candidates for each entity mention in a spaCy doc.", "name": "get_link_candidates_from_spacy_doc", "signature": "def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]"}, {"docstring": "Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.", "name": "_search_es_for_mention", "signature": "def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]"}, {"docstring": "Get a field from a dictonary from Elasticsearch dot notation.", "name": "_get_dict_field_from_dot_notation", "signature": "def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict"}, {"docstring": "Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field", "name": "_reduce_doc_to_key_fields", "signature": "def _reduce_doc_to_key_fields(self, doc: dict) -> dict"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_009171", "prompt": "Implement the Python class `ESEntityLinker` described below.\n\nClass description:\nImplement the ESEntityLinker class.\n\nMethod signatures and docstrings:\n- def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None): Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.\n- def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]: Return details of candidates for each entity mention in a spaCy doc.\n- def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]: Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.\n- def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict: Get a field from a dictonary from Elasticsearch dot notation.\n- def _reduce_doc_to_key_fields(self, doc: dict) -> dict: Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field", "prompted_full_text": "Implement the Python class `ESEntityLinker` described below.\n\nClass description:\nImplement the ESEntityLinker class.\n\nMethod signatures and docstrings:\n- def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None): Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.\n- def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]: Return details of candidates for each entity mention in a spaCy doc.\n- def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]: Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.\n- def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict: Get a field from a dictonary from Elasticsearch dot notation.\n- def _reduce_doc_to_key_fields(self, doc: dict) -> dict: Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field\n\n<|skeleton|>\nclass ESEntityLinker:\n\n def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None):\n \"\"\"Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.\"\"\"\n <|body_0|>\n\n def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]:\n \"\"\"Return details of candidates for each entity mention in a spaCy doc.\"\"\"\n <|body_1|>\n\n def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]:\n \"\"\"Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.\"\"\"\n <|body_2|>\n\n def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict:\n \"\"\"Get a field from a dictonary from Elasticsearch dot notation.\"\"\"\n <|body_3|>\n\n def _reduce_doc_to_key_fields(self, doc: dict) -> dict:\n \"\"\"Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.es = es\n self.es_index = es_index\n self.nlp = nlp\n self.source_fields = {'description': source_description_field}\n self.target_fields = {'title': target_title_field, 'description': target_description_field}\n if target_alias_field is not None:\n self.target_fields.update({'alias': target_alias_field})\n<|end_body_0|>\n\n<|body_start_1|>\n link_candidates = {}\n for ent in spacy_doc.ents:\n ent_results = self._search_es_for_mention(ent.text, n_per_ent, reduce_to_key_fields=True)\n link_candidates[ent.text] = ent_results\n return link_candidates\n<|end_body_1|>\n\n<|body_start_2|>\n search_fields = [self.target_fields['title'], self.target_fields.get('alias', '')]\n query = {'query': {'multi_match': {'query': mention, 'fuzziness': 'AUTO', 'fields': search_fields}}}\n search_results = self.es.search(index=self.es_index, body=query, size=n).get('hits', {}).get('hits', [])\n if reduce_to_key_fields:\n return [self._reduce_doc_to_key_fields(i) for i in search_results]\n return search_results\n<|end_body_2|>\n\n<|body_start_3|>\n nested_field = doc['_source']\n fields_split = field_dot_notation.split('.')\n if fields_split[0] != 'graph' and fields_split[-1] == '@value':\n fields_split = fields_split[0:-1]\n if field_dot_notation.startswith('data') and '.' in field_dot_notation[5:]:\n fields_split = ['data', field_dot_notation[5:]]\n for idx, field in enumerate(fields_split):\n if idx + 1 < len(fields_split):\n nested_field = nested_field.get(field, {})\n else:\n nested_field = nested_field.get(field, '')\n return nested_field\n<|end_body_3|>\n\n<|body_start_4|>\n key_fields = set(['uri'] + list(self.source_fields.values()) + list(self.target_fields.values()))\n reduced_doc = {field: self._get_dict_field_from_dot_notation(doc, field) for field in key_fields}\n return {k: v for k, v in reduced_doc.items() if v != ''}\n<|end_body_4|>\n", "revision_id": "77c994d7ba7253bd81140a2202bf8b03b6082e43", "skeleton": "<|skeleton|>\nclass ESEntityLinker:\n\n def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None):\n \"\"\"Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.\"\"\"\n <|body_0|>\n\n def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]:\n \"\"\"Return details of candidates for each entity mention in a spaCy doc.\"\"\"\n <|body_1|>\n\n def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]:\n \"\"\"Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.\"\"\"\n <|body_2|>\n\n def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict:\n \"\"\"Get a field from a dictonary from Elasticsearch dot notation.\"\"\"\n <|body_3|>\n\n def _reduce_doc_to_key_fields(self, doc: dict) -> dict:\n \"\"\"Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ESEntityLinker:\n def __init__(self, es, es_index: str, nlp, source_description_field: str, target_title_field: str, target_description_field: str, target_alias_field: str=None):\n \"\"\"Each of the input variables is dot notation for accessing the field from Elasticsearch `_source`, e.g. 'graph.@rdfs:label.@value'.\"\"\"\n self.es = es\n self.es_index = es_index\n self.nlp = nlp\n self.source_fields = {'description': source_description_field}\n self.target_fields = {'title': target_title_field, 'description': target_description_field}\n if target_alias_field is not None:\n self.target_fields.update({'alias': target_alias_field})\n\n def get_link_candidates_from_spacy_doc(self, spacy_doc, n_per_ent: int) -> List[dict]:\n \"\"\"Return details of candidates for each entity mention in a spaCy doc.\"\"\"\n link_candidates = {}\n for ent in spacy_doc.ents:\n ent_results = self._search_es_for_mention(ent.text, n_per_ent, reduce_to_key_fields=True)\n link_candidates[ent.text] = ent_results\n return link_candidates\n\n def _search_es_for_mention(self, mention: str, n: int, reduce_to_key_fields: bool=True) -> List[dict]:\n \"\"\"Given an entity mention, search the target Elasticsearch fields and return up to `n` documents.\"\"\"\n search_fields = [self.target_fields['title'], self.target_fields.get('alias', '')]\n query = {'query': {'multi_match': {'query': mention, 'fuzziness': 'AUTO', 'fields': search_fields}}}\n search_results = self.es.search(index=self.es_index, body=query, size=n).get('hits', {}).get('hits', [])\n if reduce_to_key_fields:\n return [self._reduce_doc_to_key_fields(i) for i in search_results]\n return search_results\n\n def _get_dict_field_from_dot_notation(self, doc: dict, field_dot_notation: str) -> dict:\n \"\"\"Get a field from a dictonary from Elasticsearch dot notation.\"\"\"\n nested_field = doc['_source']\n fields_split = field_dot_notation.split('.')\n if fields_split[0] != 'graph' and fields_split[-1] == '@value':\n fields_split = fields_split[0:-1]\n if field_dot_notation.startswith('data') and '.' in field_dot_notation[5:]:\n fields_split = ['data', field_dot_notation[5:]]\n for idx, field in enumerate(fields_split):\n if idx + 1 < len(fields_split):\n nested_field = nested_field.get(field, {})\n else:\n nested_field = nested_field.get(field, '')\n return nested_field\n\n def _reduce_doc_to_key_fields(self, doc: dict) -> dict:\n \"\"\"Reduce doc to uri, source_description_field, target_title_field, target_description_field, target_alias_field\"\"\"\n key_fields = set(['uri'] + list(self.source_fields.values()) + list(self.target_fields.values()))\n reduced_doc = {field: self._get_dict_field_from_dot_notation(doc, field) for field in key_fields}\n return {k: v for k, v in reduced_doc.items() if v != ''}\n", "source": "the_stack_v2_python_sparse", "source_path": "experiments/NEL/NEL_elasticsearch/entity_linker.py", "source_repo": "TheScienceMuseum/heritage-connector", "split": "test", "star_events_count": 16} {"blob_id": "4dda070ecbc5b975b43d774007f2cf81780bce9e", "bodies": ["self.n_inputs = n_inputs\nself.n_hiddens = n_hiddens\nself.act_fun = act_fun\nself.n_layers = n_layers\nself.n_comps = n_comps\nself.batch_norm = batch_norm\nself.mode = mode\nself.input = tt.matrix('x', dtype=dtype) if input is None else input\nself.parms = []\nself.maf = MaskedAutoregressiveFlow(n_inputs, n_hiddens, act_fun, n_layers, batch_norm, input_order, mode, self.input)\nself.bns = self.maf.bns\nself.parms += self.maf.parms\nself.input_order = self.maf.input_order\ninput_order = input_order if input_order == 'random' else self.maf.mades[-1].input_order[::-1]\nself.made = mades.MixtureOfGaussiansMade(n_inputs, n_hiddens, act_fun, n_comps, input_order, mode, self.maf.u)\nself.parms += self.made.parms\nself.L = self.made.L + self.maf.logdet_dudx\nself.L.name = 'L'\nself.trn_loss = -tt.mean(self.L)\nself.trn_loss.name = 'trn_loss'\nself.eval_lprob_f = None\nself.eval_us_f = None", "if self.eval_lprob_f is None:\n self.eval_lprob_f = theano.function(inputs=[self.input], outputs=self.L, givens=[(bn.m, bn.bm) for bn in self.bns] + [(bn.v, bn.bv) for bn in self.bns])\nlprob = self.eval_lprob_f(x.astype(dtype))\nreturn lprob if log else np.exp(lprob)", "x = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u\nx = self.made.gen(n_samples, x)\nx = self.maf.gen(n_samples, x)\nreturn x", "if self.eval_us_f is None:\n self.eval_us_f = theano.function(inputs=[self.input], outputs=self.made.u)\nreturn self.eval_us_f(x.astype(dtype))"], "bodies_text": "<|body_start_0|>\n self.n_inputs = n_inputs\n self.n_hiddens = n_hiddens\n self.act_fun = act_fun\n self.n_layers = n_layers\n self.n_comps = n_comps\n self.batch_norm = batch_norm\n self.mode = mode\n self.input = tt.matrix('x', dtype=dtype) if input is None else input\n self.parms = []\n self.maf = MaskedAutoregressiveFlow(n_inputs, n_hiddens, act_fun, n_layers, batch_norm, input_order, mode, self.input)\n self.bns = self.maf.bns\n self.parms += self.maf.parms\n self.input_order = self.maf.input_order\n input_order = input_order if input_order == 'random' else self.maf.mades[-1].input_order[::-1]\n self.made = mades.MixtureOfGaussiansMade(n_inputs, n_hiddens, act_fun, n_comps, input_order, mode, self.maf.u)\n self.parms += self.made.parms\n self.L = self.made.L + self.maf.logdet_dudx\n self.L.name = 'L'\n self.trn_loss = -tt.mean(self.L)\n self.trn_loss.name = 'trn_loss'\n self.eval_lprob_f = None\n self.eval_us_f = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.eval_lprob_f is None:\n self.eval_lprob_f = theano.function(inputs=[self.input], outputs=self.L, givens=[(bn.m, bn.bm) for bn in self.bns] + [(bn.v, bn.bv) for bn in self.bns])\n lprob = self.eval_lprob_f(x.astype(dtype))\n return lprob if log else np.exp(lprob)\n<|end_body_1|>\n\n<|body_start_2|>\n x = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u\n x = self.made.gen(n_samples, x)\n x = self.maf.gen(n_samples, x)\n return x\n<|end_body_2|>\n\n<|body_start_3|>\n if self.eval_us_f is None:\n self.eval_us_f = theano.function(inputs=[self.input], outputs=self.made.u)\n return self.eval_us_f(x.astype(dtype))\n<|end_body_3|>\n", "class_docstring": "A Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.", "class_name": "MaskedAutoregressiveFlow_on_MADE", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MaskedAutoregressiveFlow_on_MADE:\n \"\"\"A Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.\"\"\"\n\n def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None):\n \"\"\"Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created\"\"\"\n <|body_0|>\n\n def eval(self, x, log=True):\n \"\"\"Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)\"\"\"\n <|body_1|>\n\n def gen(self, n_samples=1, u=None):\n \"\"\"Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples\"\"\"\n <|body_2|>\n\n def calc_random_numbers(self, x):\n \"\"\"Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.n_inputs = n_inputs\n self.n_hiddens = n_hiddens\n self.act_fun = act_fun\n self.n_layers = n_layers\n self.n_comps = n_comps\n self.batch_norm = batch_norm\n self.mode = mode\n self.input = tt.matrix('x', dtype=dtype) if input is None else input\n self.parms = []\n self.maf = MaskedAutoregressiveFlow(n_inputs, n_hiddens, act_fun, n_layers, batch_norm, input_order, mode, self.input)\n self.bns = self.maf.bns\n self.parms += self.maf.parms\n self.input_order = self.maf.input_order\n input_order = input_order if input_order == 'random' else self.maf.mades[-1].input_order[::-1]\n self.made = mades.MixtureOfGaussiansMade(n_inputs, n_hiddens, act_fun, n_comps, input_order, mode, self.maf.u)\n self.parms += self.made.parms\n self.L = self.made.L + self.maf.logdet_dudx\n self.L.name = 'L'\n self.trn_loss = -tt.mean(self.L)\n self.trn_loss.name = 'trn_loss'\n self.eval_lprob_f = None\n self.eval_us_f = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.eval_lprob_f is None:\n self.eval_lprob_f = theano.function(inputs=[self.input], outputs=self.L, givens=[(bn.m, bn.bm) for bn in self.bns] + [(bn.v, bn.bv) for bn in self.bns])\n lprob = self.eval_lprob_f(x.astype(dtype))\n return lprob if log else np.exp(lprob)\n<|end_body_1|>\n\n<|body_start_2|>\n x = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u\n x = self.made.gen(n_samples, x)\n x = self.maf.gen(n_samples, x)\n return x\n<|end_body_2|>\n\n<|body_start_3|>\n if self.eval_us_f is None:\n self.eval_us_f = theano.function(inputs=[self.input], outputs=self.made.u)\n return self.eval_us_f(x.astype(dtype))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000068", "length_bytes": 18625, "license_type": "permissive", "methods": [{"docstring": "Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created", "name": "__init__", "signature": "def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None)"}, {"docstring": "Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)", "name": "eval", "signature": "def eval(self, x, log=True)"}, {"docstring": "Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples", "name": "gen", "signature": "def gen(self, n_samples=1, u=None)"}, {"docstring": "Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers", "name": "calc_random_numbers", "signature": "def calc_random_numbers(self, x)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_001767", "prompt": "Implement the Python class `MaskedAutoregressiveFlow_on_MADE` described below.\n\nClass description:\nA Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.\n\nMethod signatures and docstrings:\n- def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None): Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created\n- def eval(self, x, log=True): Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)\n- def gen(self, n_samples=1, u=None): Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples\n- def calc_random_numbers(self, x): Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers", "prompted_full_text": "Implement the Python class `MaskedAutoregressiveFlow_on_MADE` described below.\n\nClass description:\nA Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.\n\nMethod signatures and docstrings:\n- def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None): Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created\n- def eval(self, x, log=True): Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)\n- def gen(self, n_samples=1, u=None): Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples\n- def calc_random_numbers(self, x): Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers\n\n<|skeleton|>\nclass MaskedAutoregressiveFlow_on_MADE:\n \"\"\"A Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.\"\"\"\n\n def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None):\n \"\"\"Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created\"\"\"\n <|body_0|>\n\n def eval(self, x, log=True):\n \"\"\"Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)\"\"\"\n <|body_1|>\n\n def gen(self, n_samples=1, u=None):\n \"\"\"Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples\"\"\"\n <|body_2|>\n\n def calc_random_numbers(self, x):\n \"\"\"Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.n_inputs = n_inputs\n self.n_hiddens = n_hiddens\n self.act_fun = act_fun\n self.n_layers = n_layers\n self.n_comps = n_comps\n self.batch_norm = batch_norm\n self.mode = mode\n self.input = tt.matrix('x', dtype=dtype) if input is None else input\n self.parms = []\n self.maf = MaskedAutoregressiveFlow(n_inputs, n_hiddens, act_fun, n_layers, batch_norm, input_order, mode, self.input)\n self.bns = self.maf.bns\n self.parms += self.maf.parms\n self.input_order = self.maf.input_order\n input_order = input_order if input_order == 'random' else self.maf.mades[-1].input_order[::-1]\n self.made = mades.MixtureOfGaussiansMade(n_inputs, n_hiddens, act_fun, n_comps, input_order, mode, self.maf.u)\n self.parms += self.made.parms\n self.L = self.made.L + self.maf.logdet_dudx\n self.L.name = 'L'\n self.trn_loss = -tt.mean(self.L)\n self.trn_loss.name = 'trn_loss'\n self.eval_lprob_f = None\n self.eval_us_f = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.eval_lprob_f is None:\n self.eval_lprob_f = theano.function(inputs=[self.input], outputs=self.L, givens=[(bn.m, bn.bm) for bn in self.bns] + [(bn.v, bn.bv) for bn in self.bns])\n lprob = self.eval_lprob_f(x.astype(dtype))\n return lprob if log else np.exp(lprob)\n<|end_body_1|>\n\n<|body_start_2|>\n x = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u\n x = self.made.gen(n_samples, x)\n x = self.maf.gen(n_samples, x)\n return x\n<|end_body_2|>\n\n<|body_start_3|>\n if self.eval_us_f is None:\n self.eval_us_f = theano.function(inputs=[self.input], outputs=self.made.u)\n return self.eval_us_f(x.astype(dtype))\n<|end_body_3|>\n", "revision_id": "d5fa619db637d19f0c3018aeb1431f657dd533bf", "skeleton": "<|skeleton|>\nclass MaskedAutoregressiveFlow_on_MADE:\n \"\"\"A Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.\"\"\"\n\n def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None):\n \"\"\"Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created\"\"\"\n <|body_0|>\n\n def eval(self, x, log=True):\n \"\"\"Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)\"\"\"\n <|body_1|>\n\n def gen(self, n_samples=1, u=None):\n \"\"\"Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples\"\"\"\n <|body_2|>\n\n def calc_random_numbers(self, x):\n \"\"\"Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MaskedAutoregressiveFlow_on_MADE:\n \"\"\"A Masked Autoregressive Flow, where the target distribution is given by a MoG MADE.\"\"\"\n\n def __init__(self, n_inputs, n_hiddens, act_fun, n_layers, n_comps, batch_norm=True, input_order='sequential', mode='sequential', input=None):\n \"\"\"Constructor. :param n_inputs: number of inputs :param n_hiddens: list with number of hidden units for each hidden layer :param act_fun: name of activation function :param n_layers: number of layers in the flow :param n_comps: number of gaussians per conditional for the target made :param batch_norm: whether to use batch normalization between layers :param input_order: order of inputs of last made :param mode: strategy for assigning degrees to hidden nodes: can be 'random' or 'sequential' :param input: theano variable to serve as input; if None, a new variable is created\"\"\"\n self.n_inputs = n_inputs\n self.n_hiddens = n_hiddens\n self.act_fun = act_fun\n self.n_layers = n_layers\n self.n_comps = n_comps\n self.batch_norm = batch_norm\n self.mode = mode\n self.input = tt.matrix('x', dtype=dtype) if input is None else input\n self.parms = []\n self.maf = MaskedAutoregressiveFlow(n_inputs, n_hiddens, act_fun, n_layers, batch_norm, input_order, mode, self.input)\n self.bns = self.maf.bns\n self.parms += self.maf.parms\n self.input_order = self.maf.input_order\n input_order = input_order if input_order == 'random' else self.maf.mades[-1].input_order[::-1]\n self.made = mades.MixtureOfGaussiansMade(n_inputs, n_hiddens, act_fun, n_comps, input_order, mode, self.maf.u)\n self.parms += self.made.parms\n self.L = self.made.L + self.maf.logdet_dudx\n self.L.name = 'L'\n self.trn_loss = -tt.mean(self.L)\n self.trn_loss.name = 'trn_loss'\n self.eval_lprob_f = None\n self.eval_us_f = None\n\n def eval(self, x, log=True):\n \"\"\"Evaluate log probabilities for given inputs. :param x: data matrix where rows are inputs :param log: whether to return probabilities in the log domain :return: list of log probabilities log p(x)\"\"\"\n if self.eval_lprob_f is None:\n self.eval_lprob_f = theano.function(inputs=[self.input], outputs=self.L, givens=[(bn.m, bn.bm) for bn in self.bns] + [(bn.v, bn.bv) for bn in self.bns])\n lprob = self.eval_lprob_f(x.astype(dtype))\n return lprob if log else np.exp(lprob)\n\n def gen(self, n_samples=1, u=None):\n \"\"\"Generate samples, by propagating random numbers through each made. :param n_samples: number of samples :param u: random numbers to use in generating samples; if None, new random numbers are drawn :return: samples\"\"\"\n x = rng.randn(n_samples, self.n_inputs).astype(dtype) if u is None else u\n x = self.made.gen(n_samples, x)\n x = self.maf.gen(n_samples, x)\n return x\n\n def calc_random_numbers(self, x):\n \"\"\"Givan a dataset, calculate the random numbers used internally to generate the dataset. :param x: numpy array, rows are datapoints :return: numpy array, rows are corresponding random numbers\"\"\"\n if self.eval_us_f is None:\n self.eval_us_f = theano.function(inputs=[self.input], outputs=self.made.u)\n return self.eval_us_f(x.astype(dtype))\n", "source": "the_stack_v2_python_sparse", "source_path": "ml/models/mafs.py", "source_repo": "gpapamak/maf", "split": "test", "star_events_count": 199} {"blob_id": "dac9423825045589d158f0a102fa2700902e45b4", "bodies": ["obs = np.array(obs)\nact = np.array(act)\nself.obs_buf = np.zeros(add_batch_dim(batch_size, obs.shape))\nself.act_buf = np.zeros(add_batch_dim(batch_size, act.shape))\nself.rew_buf = np.zeros((batch_size,))\nself.ret_buf = np.zeros((batch_size,))\nself.val_buf = np.zeros((batch_size,))\nself.adv_buf = np.zeros((batch_size,))\nself.gamma, self.lam, self.eps = (gamma, lam, eps)\nself.ptr, self.episode_start, self.batch_size = (0, 0, batch_size)", "assert self.ptr < self.batch_size, 'VPGBuffer is full'\nself.obs_buf[self.ptr] = obs\nself.act_buf[self.ptr] = act\nself.rew_buf[self.ptr] = rew\nself.val_buf[self.ptr] = val\nself.ptr += 1\nreturn self.ptr == self.batch_size", "episode = slice(self.episode_start, self.ptr)\nrews = np.append(self.rew_buf[episode], last_val)\nvals = np.append(self.val_buf[episode], last_val)\ndeltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\nadv = reverse_discount_cumsum(deltas, self.gamma * self.lam)\nself.adv_buf[episode] = adv\nret = reverse_discount_cumsum(rews, self.gamma)[:-1]\nself.ret_buf[episode] = ret\nself.episode_start = self.ptr", "assert self.ptr == self.batch_size, 'VPGBuffer batch not complete'\nself.ptr, self.episode_start = (0, 0)\nadv_mean, adv_std = (np.mean(self.adv_buf), np.std(self.adv_buf))\nself.adv_buf = (self.adv_buf - adv_mean) / (adv_std + self.eps)\nreturn {'observations': jax.device_put(self.obs_buf), 'actions': jax.device_put(self.act_buf), 'returns': jax.device_put(self.ret_buf), 'advantages': jax.device_put(self.ret_buf)}"], "bodies_text": "<|body_start_0|>\n obs = np.array(obs)\n act = np.array(act)\n self.obs_buf = np.zeros(add_batch_dim(batch_size, obs.shape))\n self.act_buf = np.zeros(add_batch_dim(batch_size, act.shape))\n self.rew_buf = np.zeros((batch_size,))\n self.ret_buf = np.zeros((batch_size,))\n self.val_buf = np.zeros((batch_size,))\n self.adv_buf = np.zeros((batch_size,))\n self.gamma, self.lam, self.eps = (gamma, lam, eps)\n self.ptr, self.episode_start, self.batch_size = (0, 0, batch_size)\n<|end_body_0|>\n\n<|body_start_1|>\n assert self.ptr < self.batch_size, 'VPGBuffer is full'\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.ptr += 1\n return self.ptr == self.batch_size\n<|end_body_1|>\n\n<|body_start_2|>\n episode = slice(self.episode_start, self.ptr)\n rews = np.append(self.rew_buf[episode], last_val)\n vals = np.append(self.val_buf[episode], last_val)\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n adv = reverse_discount_cumsum(deltas, self.gamma * self.lam)\n self.adv_buf[episode] = adv\n ret = reverse_discount_cumsum(rews, self.gamma)[:-1]\n self.ret_buf[episode] = ret\n self.episode_start = self.ptr\n<|end_body_2|>\n\n<|body_start_3|>\n assert self.ptr == self.batch_size, 'VPGBuffer batch not complete'\n self.ptr, self.episode_start = (0, 0)\n adv_mean, adv_std = (np.mean(self.adv_buf), np.std(self.adv_buf))\n self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + self.eps)\n return {'observations': jax.device_put(self.obs_buf), 'actions': jax.device_put(self.act_buf), 'returns': jax.device_put(self.ret_buf), 'advantages': jax.device_put(self.ret_buf)}\n<|end_body_3|>\n", "class_docstring": "A buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.", "class_name": "GAEBuffer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GAEBuffer:\n \"\"\"A buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.\"\"\"\n\n def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08):\n \"\"\"Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.\"\"\"\n <|body_0|>\n\n def store(self, obs, act, rew, val):\n \"\"\"Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.\"\"\"\n <|body_1|>\n\n def end_episode(self, last_val=0):\n \"\"\"Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.\"\"\"\n <|body_2|>\n\n def get_batch(self):\n \"\"\"Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obs = np.array(obs)\n act = np.array(act)\n self.obs_buf = np.zeros(add_batch_dim(batch_size, obs.shape))\n self.act_buf = np.zeros(add_batch_dim(batch_size, act.shape))\n self.rew_buf = np.zeros((batch_size,))\n self.ret_buf = np.zeros((batch_size,))\n self.val_buf = np.zeros((batch_size,))\n self.adv_buf = np.zeros((batch_size,))\n self.gamma, self.lam, self.eps = (gamma, lam, eps)\n self.ptr, self.episode_start, self.batch_size = (0, 0, batch_size)\n<|end_body_0|>\n\n<|body_start_1|>\n assert self.ptr < self.batch_size, 'VPGBuffer is full'\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.ptr += 1\n return self.ptr == self.batch_size\n<|end_body_1|>\n\n<|body_start_2|>\n episode = slice(self.episode_start, self.ptr)\n rews = np.append(self.rew_buf[episode], last_val)\n vals = np.append(self.val_buf[episode], last_val)\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n adv = reverse_discount_cumsum(deltas, self.gamma * self.lam)\n self.adv_buf[episode] = adv\n ret = reverse_discount_cumsum(rews, self.gamma)[:-1]\n self.ret_buf[episode] = ret\n self.episode_start = self.ptr\n<|end_body_2|>\n\n<|body_start_3|>\n assert self.ptr == self.batch_size, 'VPGBuffer batch not complete'\n self.ptr, self.episode_start = (0, 0)\n adv_mean, adv_std = (np.mean(self.adv_buf), np.std(self.adv_buf))\n self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + self.eps)\n return {'observations': jax.device_put(self.obs_buf), 'actions': jax.device_put(self.act_buf), 'returns': jax.device_put(self.ret_buf), 'advantages': jax.device_put(self.ret_buf)}\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000069", "length_bytes": 4069, "license_type": "permissive", "methods": [{"docstring": "Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.", "name": "__init__", "signature": "def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08)"}, {"docstring": "Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.", "name": "store", "signature": "def store(self, obs, act, rew, val)"}, {"docstring": "Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.", "name": "end_episode", "signature": "def end_episode(self, last_val=0)"}, {"docstring": "Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.", "name": "get_batch", "signature": "def get_batch(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_047739", "prompt": "Implement the Python class `GAEBuffer` described below.\n\nClass description:\nA buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.\n\nMethod signatures and docstrings:\n- def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08): Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.\n- def store(self, obs, act, rew, val): Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.\n- def end_episode(self, last_val=0): Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.\n- def get_batch(self): Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.", "prompted_full_text": "Implement the Python class `GAEBuffer` described below.\n\nClass description:\nA buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.\n\nMethod signatures and docstrings:\n- def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08): Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.\n- def store(self, obs, act, rew, val): Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.\n- def end_episode(self, last_val=0): Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.\n- def get_batch(self): Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.\n\n<|skeleton|>\nclass GAEBuffer:\n \"\"\"A buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.\"\"\"\n\n def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08):\n \"\"\"Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.\"\"\"\n <|body_0|>\n\n def store(self, obs, act, rew, val):\n \"\"\"Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.\"\"\"\n <|body_1|>\n\n def end_episode(self, last_val=0):\n \"\"\"Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.\"\"\"\n <|body_2|>\n\n def get_batch(self):\n \"\"\"Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obs = np.array(obs)\n act = np.array(act)\n self.obs_buf = np.zeros(add_batch_dim(batch_size, obs.shape))\n self.act_buf = np.zeros(add_batch_dim(batch_size, act.shape))\n self.rew_buf = np.zeros((batch_size,))\n self.ret_buf = np.zeros((batch_size,))\n self.val_buf = np.zeros((batch_size,))\n self.adv_buf = np.zeros((batch_size,))\n self.gamma, self.lam, self.eps = (gamma, lam, eps)\n self.ptr, self.episode_start, self.batch_size = (0, 0, batch_size)\n<|end_body_0|>\n\n<|body_start_1|>\n assert self.ptr < self.batch_size, 'VPGBuffer is full'\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.ptr += 1\n return self.ptr == self.batch_size\n<|end_body_1|>\n\n<|body_start_2|>\n episode = slice(self.episode_start, self.ptr)\n rews = np.append(self.rew_buf[episode], last_val)\n vals = np.append(self.val_buf[episode], last_val)\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n adv = reverse_discount_cumsum(deltas, self.gamma * self.lam)\n self.adv_buf[episode] = adv\n ret = reverse_discount_cumsum(rews, self.gamma)[:-1]\n self.ret_buf[episode] = ret\n self.episode_start = self.ptr\n<|end_body_2|>\n\n<|body_start_3|>\n assert self.ptr == self.batch_size, 'VPGBuffer batch not complete'\n self.ptr, self.episode_start = (0, 0)\n adv_mean, adv_std = (np.mean(self.adv_buf), np.std(self.adv_buf))\n self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + self.eps)\n return {'observations': jax.device_put(self.obs_buf), 'actions': jax.device_put(self.act_buf), 'returns': jax.device_put(self.ret_buf), 'advantages': jax.device_put(self.ret_buf)}\n<|end_body_3|>\n", "revision_id": "98a19b23abca1f3f5933c1b7178ed34f392de8dd", "skeleton": "<|skeleton|>\nclass GAEBuffer:\n \"\"\"A buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.\"\"\"\n\n def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08):\n \"\"\"Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.\"\"\"\n <|body_0|>\n\n def store(self, obs, act, rew, val):\n \"\"\"Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.\"\"\"\n <|body_1|>\n\n def end_episode(self, last_val=0):\n \"\"\"Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.\"\"\"\n <|body_2|>\n\n def get_batch(self):\n \"\"\"Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GAEBuffer:\n \"\"\"A buffer that stores (observation, action, reward, state value) tuples collected while interacting with environment and produces a (observations, actions, returns, advantages) batch when full. It uses the General Advantage Estimation (GAE) method to compute advantages.\"\"\"\n\n def __init__(self, batch_size, obs, act, gamma=0.99, lam=0.95, eps=1e-08):\n \"\"\"Constructs the buffer. Args: batch_size: Number of samples output batch. obs: Dummy observation that is used to infer shape. act: Dummy action that is used to infor shape. gamma: GAE's gamma parameter. lam: GAE's lamba parameter. eps: Scalar added to denominators to prevent instabilities.\"\"\"\n obs = np.array(obs)\n act = np.array(act)\n self.obs_buf = np.zeros(add_batch_dim(batch_size, obs.shape))\n self.act_buf = np.zeros(add_batch_dim(batch_size, act.shape))\n self.rew_buf = np.zeros((batch_size,))\n self.ret_buf = np.zeros((batch_size,))\n self.val_buf = np.zeros((batch_size,))\n self.adv_buf = np.zeros((batch_size,))\n self.gamma, self.lam, self.eps = (gamma, lam, eps)\n self.ptr, self.episode_start, self.batch_size = (0, 0, batch_size)\n\n def store(self, obs, act, rew, val):\n \"\"\"Stores a timestep in the buffer. Args: obs: Observation at the timestep. act: Action taken at the timestep. rew: Reward received at the timestep. val: Estimated value of the current state. Returns: True if the buffer is full, False otherwise.\"\"\"\n assert self.ptr < self.batch_size, 'VPGBuffer is full'\n self.obs_buf[self.ptr] = obs\n self.act_buf[self.ptr] = act\n self.rew_buf[self.ptr] = rew\n self.val_buf[self.ptr] = val\n self.ptr += 1\n return self.ptr == self.batch_size\n\n def end_episode(self, last_val=0):\n \"\"\"Ends the ongoing episode and calculates its return and advantage. Args: last_val: Value of the last state of the episode, useful to bootstrap estimates when the episode was cut short.\"\"\"\n episode = slice(self.episode_start, self.ptr)\n rews = np.append(self.rew_buf[episode], last_val)\n vals = np.append(self.val_buf[episode], last_val)\n deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1]\n adv = reverse_discount_cumsum(deltas, self.gamma * self.lam)\n self.adv_buf[episode] = adv\n ret = reverse_discount_cumsum(rews, self.gamma)[:-1]\n self.ret_buf[episode] = ret\n self.episode_start = self.ptr\n\n def get_batch(self):\n \"\"\"Returns the batch collected on the buffer. Assumes that the buffer is full and that the end_episode method was called immediately before. Returns: A batch containing observations, actions, returns and advantages. Raises: AssertionError: when called on buffer that is not full.\"\"\"\n assert self.ptr == self.batch_size, 'VPGBuffer batch not complete'\n self.ptr, self.episode_start = (0, 0)\n adv_mean, adv_std = (np.mean(self.adv_buf), np.std(self.adv_buf))\n self.adv_buf = (self.adv_buf - adv_mean) / (adv_std + self.eps)\n return {'observations': jax.device_put(self.obs_buf), 'actions': jax.device_put(self.act_buf), 'returns': jax.device_put(self.ret_buf), 'advantages': jax.device_put(self.ret_buf)}\n", "source": "the_stack_v2_python_sparse", "source_path": "jax_baselines/vpg/buffer.py", "source_repo": "mmcenta/jax-baselines", "split": "test", "star_events_count": 0} {"blob_id": "feeb2dffb2ece069fe6fdedd8b456f797a9b8564", "bodies": ["arr, path = ([], '')\nself.recursion(root, path, arr)\nreturn [col for col in arr if sum(col) == summary]", "if root:\n path += str(root.val) + ','\n if not root.left and (not root.right):\n arr.append([int(node) for node in path.split(',') if node != ''])\n path = ''\n self.recursion(root.left, path, arr)\n self.recursion(root.right, path, arr)"], "bodies_text": "<|body_start_0|>\n arr, path = ([], '')\n self.recursion(root, path, arr)\n return [col for col in arr if sum(col) == summary]\n<|end_body_0|>\n\n<|body_start_1|>\n if root:\n path += str(root.val) + ','\n if not root.left and (not root.right):\n arr.append([int(node) for node in path.split(',') if node != ''])\n path = ''\n self.recursion(root.left, path, arr)\n self.recursion(root.right, path, arr)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def pathSum(self, root, summary):\n \"\"\":type root: TreeNode :type summary: int :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def recursion(self, root, path, arr):\n \"\"\":param root: :param path: :param arr: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n arr, path = ([], '')\n self.recursion(root, path, arr)\n return [col for col in arr if sum(col) == summary]\n<|end_body_0|>\n\n<|body_start_1|>\n if root:\n path += str(root.val) + ','\n if not root.left and (not root.right):\n arr.append([int(node) for node in path.split(',') if node != ''])\n path = ''\n self.recursion(root.left, path, arr)\n self.recursion(root.right, path, arr)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000070", "length_bytes": 915, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :type summary: int :rtype: List[List[int]]", "name": "pathSum", "signature": "def pathSum(self, root, summary)"}, {"docstring": ":param root: :param path: :param arr: :return:", "name": "recursion", "signature": "def recursion(self, root, path, arr)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025825", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pathSum(self, root, summary): :type root: TreeNode :type summary: int :rtype: List[List[int]]\n- def recursion(self, root, path, arr): :param root: :param path: :param arr: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def pathSum(self, root, summary): :type root: TreeNode :type summary: int :rtype: List[List[int]]\n- def recursion(self, root, path, arr): :param root: :param path: :param arr: :return:\n\n<|skeleton|>\nclass Solution:\n\n def pathSum(self, root, summary):\n \"\"\":type root: TreeNode :type summary: int :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def recursion(self, root, path, arr):\n \"\"\":param root: :param path: :param arr: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n arr, path = ([], '')\n self.recursion(root, path, arr)\n return [col for col in arr if sum(col) == summary]\n<|end_body_0|>\n\n<|body_start_1|>\n if root:\n path += str(root.val) + ','\n if not root.left and (not root.right):\n arr.append([int(node) for node in path.split(',') if node != ''])\n path = ''\n self.recursion(root.left, path, arr)\n self.recursion(root.right, path, arr)\n<|end_body_1|>\n", "revision_id": "b38cc7d24c85ef6e7a1342d7ae0054f6c663e600", "skeleton": "<|skeleton|>\nclass Solution:\n\n def pathSum(self, root, summary):\n \"\"\":type root: TreeNode :type summary: int :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def recursion(self, root, path, arr):\n \"\"\":param root: :param path: :param arr: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def pathSum(self, root, summary):\n \"\"\":type root: TreeNode :type summary: int :rtype: List[List[int]]\"\"\"\n arr, path = ([], '')\n self.recursion(root, path, arr)\n return [col for col in arr if sum(col) == summary]\n\n def recursion(self, root, path, arr):\n \"\"\":param root: :param path: :param arr: :return:\"\"\"\n if root:\n path += str(root.val) + ','\n if not root.left and (not root.right):\n arr.append([int(node) for node in path.split(',') if node != ''])\n path = ''\n self.recursion(root.left, path, arr)\n self.recursion(root.right, path, arr)\n", "source": "the_stack_v2_python_sparse", "source_path": "101~200/113.py", "source_repo": "strategist922/leetcode-5", "split": "test", "star_events_count": 0} {"blob_id": "4c0eb311172c0b33351f8ec64c5cc14639190a1f", "bodies": ["self.gamma = float(input())\nself.kernel = int(input())\nself.mode = str(input()).strip()", "mask = np.sort(mask.flatten())\nmiddle = round(mask.shape[0] / 2)\nquart_middle = round(mask.shape[0] / 4)\nreturn mask[middle + quart_middle] - mask[quart_middle]", "if self.mode == 'average':\n return np.std(mask)\nelif self.mode == 'robust':\n return self.iqr(mask)\nelse:\n raise ValueError('Dispersion mode is unknown.')", "if self.mode == 'average':\n return np.mean(mask)\nelif self.mode == 'robust':\n return np.median(mask)\nelse:\n raise ValueError('Dispersion mode is unknown.')", "dispn = img[0:floor(img.shape[0] / 6) - 1, 0:floor(img.shape[1] / 6) - 1]\ndispn = self.calc_disp(dispn)\ndispn = 1 if dispn == 0 else dispn\npadding = int((self.kernel - 1) / 2)\nnew_img = np.copy(img)\nfor i in range(padding, img.shape[0] - padding):\n for j in range(padding, img.shape[1] - padding):\n mask = img[i - padding:i + padding + 1, j - padding:j + padding + 1]\n centrl = self.calc_centrl(mask)\n displ = self.calc_disp(mask)\n displ = dispn if displ == 0 else displ\n if dispn > displ:\n displ = dispn\n new_img[i][j] = img[i][j] - self.gamma * dispn / displ * (img[i][j] - centrl)\nreturn new_img"], "bodies_text": "<|body_start_0|>\n self.gamma = float(input())\n self.kernel = int(input())\n self.mode = str(input()).strip()\n<|end_body_0|>\n\n<|body_start_1|>\n mask = np.sort(mask.flatten())\n middle = round(mask.shape[0] / 2)\n quart_middle = round(mask.shape[0] / 4)\n return mask[middle + quart_middle] - mask[quart_middle]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.mode == 'average':\n return np.std(mask)\n elif self.mode == 'robust':\n return self.iqr(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n<|end_body_2|>\n\n<|body_start_3|>\n if self.mode == 'average':\n return np.mean(mask)\n elif self.mode == 'robust':\n return np.median(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n<|end_body_3|>\n\n<|body_start_4|>\n dispn = img[0:floor(img.shape[0] / 6) - 1, 0:floor(img.shape[1] / 6) - 1]\n dispn = self.calc_disp(dispn)\n dispn = 1 if dispn == 0 else dispn\n padding = int((self.kernel - 1) / 2)\n new_img = np.copy(img)\n for i in range(padding, img.shape[0] - padding):\n for j in range(padding, img.shape[1] - padding):\n mask = img[i - padding:i + padding + 1, j - padding:j + padding + 1]\n centrl = self.calc_centrl(mask)\n displ = self.calc_disp(mask)\n displ = dispn if displ == 0 else displ\n if dispn > displ:\n displ = dispn\n new_img[i][j] = img[i][j] - self.gamma * dispn / displ * (img[i][j] - centrl)\n return new_img\n<|end_body_4|>\n", "class_docstring": "", "class_name": "DenoisingFilter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DenoisingFilter:\n\n def __init__(self):\n \"\"\"Initializes main parameters\"\"\"\n <|body_0|>\n\n def iqr(self, mask):\n \"\"\"Computes the interquartile range @param mask: set of reference pixels\"\"\"\n <|body_1|>\n\n def calc_disp(self, mask):\n \"\"\"Calculates the dispersion measure @param mask: set of reference pixels\"\"\"\n <|body_2|>\n\n def calc_centrl(self, mask):\n \"\"\"Calculates the centrality measure @param mask: set of reference pixels\"\"\"\n <|body_3|>\n\n def restore_img(self, img):\n \"\"\"Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.gamma = float(input())\n self.kernel = int(input())\n self.mode = str(input()).strip()\n<|end_body_0|>\n\n<|body_start_1|>\n mask = np.sort(mask.flatten())\n middle = round(mask.shape[0] / 2)\n quart_middle = round(mask.shape[0] / 4)\n return mask[middle + quart_middle] - mask[quart_middle]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.mode == 'average':\n return np.std(mask)\n elif self.mode == 'robust':\n return self.iqr(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n<|end_body_2|>\n\n<|body_start_3|>\n if self.mode == 'average':\n return np.mean(mask)\n elif self.mode == 'robust':\n return np.median(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n<|end_body_3|>\n\n<|body_start_4|>\n dispn = img[0:floor(img.shape[0] / 6) - 1, 0:floor(img.shape[1] / 6) - 1]\n dispn = self.calc_disp(dispn)\n dispn = 1 if dispn == 0 else dispn\n padding = int((self.kernel - 1) / 2)\n new_img = np.copy(img)\n for i in range(padding, img.shape[0] - padding):\n for j in range(padding, img.shape[1] - padding):\n mask = img[i - padding:i + padding + 1, j - padding:j + padding + 1]\n centrl = self.calc_centrl(mask)\n displ = self.calc_disp(mask)\n displ = dispn if displ == 0 else displ\n if dispn > displ:\n displ = dispn\n new_img[i][j] = img[i][j] - self.gamma * dispn / displ * (img[i][j] - centrl)\n return new_img\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000071", "length_bytes": 10085, "license_type": "no_license", "methods": [{"docstring": "Initializes main parameters", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Computes the interquartile range @param mask: set of reference pixels", "name": "iqr", "signature": "def iqr(self, mask)"}, {"docstring": "Calculates the dispersion measure @param mask: set of reference pixels", "name": "calc_disp", "signature": "def calc_disp(self, mask)"}, {"docstring": "Calculates the centrality measure @param mask: set of reference pixels", "name": "calc_centrl", "signature": "def calc_centrl(self, mask)"}, {"docstring": "Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered", "name": "restore_img", "signature": "def restore_img(self, img)"}], "n_methods": 5, "prompt": "Implement the Python class `DenoisingFilter` described below.\n\nClass description:\nImplement the DenoisingFilter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initializes main parameters\n- def iqr(self, mask): Computes the interquartile range @param mask: set of reference pixels\n- def calc_disp(self, mask): Calculates the dispersion measure @param mask: set of reference pixels\n- def calc_centrl(self, mask): Calculates the centrality measure @param mask: set of reference pixels\n- def restore_img(self, img): Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered", "prompted_full_text": "Implement the Python class `DenoisingFilter` described below.\n\nClass description:\nImplement the DenoisingFilter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initializes main parameters\n- def iqr(self, mask): Computes the interquartile range @param mask: set of reference pixels\n- def calc_disp(self, mask): Calculates the dispersion measure @param mask: set of reference pixels\n- def calc_centrl(self, mask): Calculates the centrality measure @param mask: set of reference pixels\n- def restore_img(self, img): Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered\n\n<|skeleton|>\nclass DenoisingFilter:\n\n def __init__(self):\n \"\"\"Initializes main parameters\"\"\"\n <|body_0|>\n\n def iqr(self, mask):\n \"\"\"Computes the interquartile range @param mask: set of reference pixels\"\"\"\n <|body_1|>\n\n def calc_disp(self, mask):\n \"\"\"Calculates the dispersion measure @param mask: set of reference pixels\"\"\"\n <|body_2|>\n\n def calc_centrl(self, mask):\n \"\"\"Calculates the centrality measure @param mask: set of reference pixels\"\"\"\n <|body_3|>\n\n def restore_img(self, img):\n \"\"\"Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.gamma = float(input())\n self.kernel = int(input())\n self.mode = str(input()).strip()\n<|end_body_0|>\n\n<|body_start_1|>\n mask = np.sort(mask.flatten())\n middle = round(mask.shape[0] / 2)\n quart_middle = round(mask.shape[0] / 4)\n return mask[middle + quart_middle] - mask[quart_middle]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.mode == 'average':\n return np.std(mask)\n elif self.mode == 'robust':\n return self.iqr(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n<|end_body_2|>\n\n<|body_start_3|>\n if self.mode == 'average':\n return np.mean(mask)\n elif self.mode == 'robust':\n return np.median(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n<|end_body_3|>\n\n<|body_start_4|>\n dispn = img[0:floor(img.shape[0] / 6) - 1, 0:floor(img.shape[1] / 6) - 1]\n dispn = self.calc_disp(dispn)\n dispn = 1 if dispn == 0 else dispn\n padding = int((self.kernel - 1) / 2)\n new_img = np.copy(img)\n for i in range(padding, img.shape[0] - padding):\n for j in range(padding, img.shape[1] - padding):\n mask = img[i - padding:i + padding + 1, j - padding:j + padding + 1]\n centrl = self.calc_centrl(mask)\n displ = self.calc_disp(mask)\n displ = dispn if displ == 0 else displ\n if dispn > displ:\n displ = dispn\n new_img[i][j] = img[i][j] - self.gamma * dispn / displ * (img[i][j] - centrl)\n return new_img\n<|end_body_4|>\n", "revision_id": "2a4adef88508c6d9b134920f758044dece09a58e", "skeleton": "<|skeleton|>\nclass DenoisingFilter:\n\n def __init__(self):\n \"\"\"Initializes main parameters\"\"\"\n <|body_0|>\n\n def iqr(self, mask):\n \"\"\"Computes the interquartile range @param mask: set of reference pixels\"\"\"\n <|body_1|>\n\n def calc_disp(self, mask):\n \"\"\"Calculates the dispersion measure @param mask: set of reference pixels\"\"\"\n <|body_2|>\n\n def calc_centrl(self, mask):\n \"\"\"Calculates the centrality measure @param mask: set of reference pixels\"\"\"\n <|body_3|>\n\n def restore_img(self, img):\n \"\"\"Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DenoisingFilter:\n def __init__(self):\n \"\"\"Initializes main parameters\"\"\"\n self.gamma = float(input())\n self.kernel = int(input())\n self.mode = str(input()).strip()\n\n def iqr(self, mask):\n \"\"\"Computes the interquartile range @param mask: set of reference pixels\"\"\"\n mask = np.sort(mask.flatten())\n middle = round(mask.shape[0] / 2)\n quart_middle = round(mask.shape[0] / 4)\n return mask[middle + quart_middle] - mask[quart_middle]\n\n def calc_disp(self, mask):\n \"\"\"Calculates the dispersion measure @param mask: set of reference pixels\"\"\"\n if self.mode == 'average':\n return np.std(mask)\n elif self.mode == 'robust':\n return self.iqr(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n\n def calc_centrl(self, mask):\n \"\"\"Calculates the centrality measure @param mask: set of reference pixels\"\"\"\n if self.mode == 'average':\n return np.mean(mask)\n elif self.mode == 'robust':\n return np.median(mask)\n else:\n raise ValueError('Dispersion mode is unknown.')\n\n def restore_img(self, img):\n \"\"\"Removes noise from the given image based on the dispersion of a sample of the image and the centrality measure of each pixel. @param img: numpy array with image to be filtered\"\"\"\n dispn = img[0:floor(img.shape[0] / 6) - 1, 0:floor(img.shape[1] / 6) - 1]\n dispn = self.calc_disp(dispn)\n dispn = 1 if dispn == 0 else dispn\n padding = int((self.kernel - 1) / 2)\n new_img = np.copy(img)\n for i in range(padding, img.shape[0] - padding):\n for j in range(padding, img.shape[1] - padding):\n mask = img[i - padding:i + padding + 1, j - padding:j + padding + 1]\n centrl = self.calc_centrl(mask)\n displ = self.calc_disp(mask)\n displ = dispn if displ == 0 else displ\n if dispn > displ:\n displ = dispn\n new_img[i][j] = img[i][j] - self.gamma * dispn / displ * (img[i][j] - centrl)\n return new_img\n", "source": "the_stack_v2_python_sparse", "source_path": "SCC5830/ex3/src/imagerestoration.py", "source_repo": "damaresende/USP", "split": "test", "star_events_count": 0} {"blob_id": "952817113263ca6f91c6a65623c6770de889206d", "bodies": ["identifier = resource.identifier\ncontainer_manager = self.application.container_manager\ncontainer = (yield container_manager.find_container(url_id=identifier))\nif not container:\n raise exceptions.NotFound()\ntry:\n yield self.application.reverse_proxy.unregister(container.urlpath)\nexcept Exception:\n self.log.exception('Could not remove reverse proxy for id {}'.format(identifier))\ntry:\n yield container_manager.stop_and_remove_container(container.docker_id)\nexcept Exception:\n self.log.exception('Could not stop and remove container for id {}'.format(identifier))", "manager = self.application.container_manager\ncontainers = (yield manager.find_containers())\nitems = []\nfor c in containers:\n item = Container(identifier=c.url_id)\n item.fill(c)\n items.append(item)\nitems_response.set(items)"], "bodies_text": "<|body_start_0|>\n identifier = resource.identifier\n container_manager = self.application.container_manager\n container = (yield container_manager.find_container(url_id=identifier))\n if not container:\n raise exceptions.NotFound()\n try:\n yield self.application.reverse_proxy.unregister(container.urlpath)\n except Exception:\n self.log.exception('Could not remove reverse proxy for id {}'.format(identifier))\n try:\n yield container_manager.stop_and_remove_container(container.docker_id)\n except Exception:\n self.log.exception('Could not stop and remove container for id {}'.format(identifier))\n<|end_body_0|>\n\n<|body_start_1|>\n manager = self.application.container_manager\n containers = (yield manager.find_containers())\n items = []\n for c in containers:\n item = Container(identifier=c.url_id)\n item.fill(c)\n items.append(item)\n items_response.set(items)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ContainerHandler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ContainerHandler:\n\n def delete(self, resource, **kwargs):\n \"\"\"Stop the container.\"\"\"\n <|body_0|>\n\n def items(self, items_response, **kwargs):\n \"\"\"Get all the currently running containers.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n identifier = resource.identifier\n container_manager = self.application.container_manager\n container = (yield container_manager.find_container(url_id=identifier))\n if not container:\n raise exceptions.NotFound()\n try:\n yield self.application.reverse_proxy.unregister(container.urlpath)\n except Exception:\n self.log.exception('Could not remove reverse proxy for id {}'.format(identifier))\n try:\n yield container_manager.stop_and_remove_container(container.docker_id)\n except Exception:\n self.log.exception('Could not stop and remove container for id {}'.format(identifier))\n<|end_body_0|>\n\n<|body_start_1|>\n manager = self.application.container_manager\n containers = (yield manager.find_containers())\n items = []\n for c in containers:\n item = Container(identifier=c.url_id)\n item.fill(c)\n items.append(item)\n items_response.set(items)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000072", "length_bytes": 2491, "license_type": "permissive", "methods": [{"docstring": "Stop the container.", "name": "delete", "signature": "def delete(self, resource, **kwargs)"}, {"docstring": "Get all the currently running containers.", "name": "items", "signature": "def items(self, items_response, **kwargs)"}], "n_methods": 2, "prompt": "Implement the Python class `ContainerHandler` described below.\n\nClass description:\nImplement the ContainerHandler class.\n\nMethod signatures and docstrings:\n- def delete(self, resource, **kwargs): Stop the container.\n- def items(self, items_response, **kwargs): Get all the currently running containers.", "prompted_full_text": "Implement the Python class `ContainerHandler` described below.\n\nClass description:\nImplement the ContainerHandler class.\n\nMethod signatures and docstrings:\n- def delete(self, resource, **kwargs): Stop the container.\n- def items(self, items_response, **kwargs): Get all the currently running containers.\n\n<|skeleton|>\nclass ContainerHandler:\n\n def delete(self, resource, **kwargs):\n \"\"\"Stop the container.\"\"\"\n <|body_0|>\n\n def items(self, items_response, **kwargs):\n \"\"\"Get all the currently running containers.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n identifier = resource.identifier\n container_manager = self.application.container_manager\n container = (yield container_manager.find_container(url_id=identifier))\n if not container:\n raise exceptions.NotFound()\n try:\n yield self.application.reverse_proxy.unregister(container.urlpath)\n except Exception:\n self.log.exception('Could not remove reverse proxy for id {}'.format(identifier))\n try:\n yield container_manager.stop_and_remove_container(container.docker_id)\n except Exception:\n self.log.exception('Could not stop and remove container for id {}'.format(identifier))\n<|end_body_0|>\n\n<|body_start_1|>\n manager = self.application.container_manager\n containers = (yield manager.find_containers())\n items = []\n for c in containers:\n item = Container(identifier=c.url_id)\n item.fill(c)\n items.append(item)\n items_response.set(items)\n<|end_body_1|>\n", "revision_id": "d7130d3d7dd2fef3b15d26c4438e7b5aff6fe335", "skeleton": "<|skeleton|>\nclass ContainerHandler:\n\n def delete(self, resource, **kwargs):\n \"\"\"Stop the container.\"\"\"\n <|body_0|>\n\n def items(self, items_response, **kwargs):\n \"\"\"Get all the currently running containers.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ContainerHandler:\n def delete(self, resource, **kwargs):\n \"\"\"Stop the container.\"\"\"\n identifier = resource.identifier\n container_manager = self.application.container_manager\n container = (yield container_manager.find_container(url_id=identifier))\n if not container:\n raise exceptions.NotFound()\n try:\n yield self.application.reverse_proxy.unregister(container.urlpath)\n except Exception:\n self.log.exception('Could not remove reverse proxy for id {}'.format(identifier))\n try:\n yield container_manager.stop_and_remove_container(container.docker_id)\n except Exception:\n self.log.exception('Could not stop and remove container for id {}'.format(identifier))\n\n def items(self, items_response, **kwargs):\n \"\"\"Get all the currently running containers.\"\"\"\n manager = self.application.container_manager\n containers = (yield manager.find_containers())\n items = []\n for c in containers:\n item = Container(identifier=c.url_id)\n item.fill(c)\n items.append(item)\n items_response.set(items)\n", "source": "the_stack_v2_python_sparse", "source_path": "remoteappmanager/webapi/admin/container.py", "source_repo": "simphony/simphony-remote", "split": "test", "star_events_count": 7} {"blob_id": "a340863823656cb2e5664bb73bb0fedcdefa7cdd", "bodies": ["n = len(nums)\nones = 0\ntwos = 0\nfor t in nums:\n if t == 1:\n ones += 1\n elif t == 2:\n twos += 1\nnums[:] = [0] * (n - ones - twos) + [1] * ones + [2] * twos", "n = len(nums)\nzero_ptr = 0\nfor i in range(n):\n if nums[i] == 0:\n t = nums[zero_ptr]\n nums[zero_ptr] = nums[i]\n nums[i] = t\n zero_ptr += 1\none_ptr = zero_ptr\nfor i in range(zero_ptr, n):\n if nums[i] == 1:\n t = nums[one_ptr]\n nums[one_ptr] = nums[i]\n nums[i] = t\n one_ptr += 1\nprint(nums[i])"], "bodies_text": "<|body_start_0|>\n n = len(nums)\n ones = 0\n twos = 0\n for t in nums:\n if t == 1:\n ones += 1\n elif t == 2:\n twos += 1\n nums[:] = [0] * (n - ones - twos) + [1] * ones + [2] * twos\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n zero_ptr = 0\n for i in range(n):\n if nums[i] == 0:\n t = nums[zero_ptr]\n nums[zero_ptr] = nums[i]\n nums[i] = t\n zero_ptr += 1\n one_ptr = zero_ptr\n for i in range(zero_ptr, n):\n if nums[i] == 1:\n t = nums[one_ptr]\n nums[one_ptr] = nums[i]\n nums[i] = t\n one_ptr += 1\n print(nums[i])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"O(n) O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(nums)\n ones = 0\n twos = 0\n for t in nums:\n if t == 1:\n ones += 1\n elif t == 2:\n twos += 1\n nums[:] = [0] * (n - ones - twos) + [1] * ones + [2] * twos\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n zero_ptr = 0\n for i in range(n):\n if nums[i] == 0:\n t = nums[zero_ptr]\n nums[zero_ptr] = nums[i]\n nums[i] = t\n zero_ptr += 1\n one_ptr = zero_ptr\n for i in range(zero_ptr, n):\n if nums[i] == 1:\n t = nums[one_ptr]\n nums[one_ptr] = nums[i]\n nums[i] = t\n one_ptr += 1\n print(nums[i])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000073", "length_bytes": 1169, "license_type": "no_license", "methods": [{"docstring": "Do not return anything, modify nums in-place instead.", "name": "sortColors", "signature": "def sortColors(self, nums: [int]) -> None"}, {"docstring": "O(n) O(1)", "name": "sortColors", "signature": "def sortColors(self, nums: [int]) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000221", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def sortColors(self, nums: [int]) -> None: Do not return anything, modify nums in-place instead.\n- def sortColors(self, nums: [int]) -> None: O(n) O(1)", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def sortColors(self, nums: [int]) -> None: Do not return anything, modify nums in-place instead.\n- def sortColors(self, nums: [int]) -> None: O(n) O(1)\n\n<|skeleton|>\nclass Solution:\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"O(n) O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(nums)\n ones = 0\n twos = 0\n for t in nums:\n if t == 1:\n ones += 1\n elif t == 2:\n twos += 1\n nums[:] = [0] * (n - ones - twos) + [1] * ones + [2] * twos\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n zero_ptr = 0\n for i in range(n):\n if nums[i] == 0:\n t = nums[zero_ptr]\n nums[zero_ptr] = nums[i]\n nums[i] = t\n zero_ptr += 1\n one_ptr = zero_ptr\n for i in range(zero_ptr, n):\n if nums[i] == 1:\n t = nums[one_ptr]\n nums[one_ptr] = nums[i]\n nums[i] = t\n one_ptr += 1\n print(nums[i])\n<|end_body_1|>\n", "revision_id": "26a467dfe8acd8ae4be0cd2784d79eebf09c06ce", "skeleton": "<|skeleton|>\nclass Solution:\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"O(n) O(1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def sortColors(self, nums: [int]) -> None:\n \"\"\"Do not return anything, modify nums in-place instead.\"\"\"\n n = len(nums)\n ones = 0\n twos = 0\n for t in nums:\n if t == 1:\n ones += 1\n elif t == 2:\n twos += 1\n nums[:] = [0] * (n - ones - twos) + [1] * ones + [2] * twos\n\n def sortColors(self, nums: [int]) -> None:\n \"\"\"O(n) O(1)\"\"\"\n n = len(nums)\n zero_ptr = 0\n for i in range(n):\n if nums[i] == 0:\n t = nums[zero_ptr]\n nums[zero_ptr] = nums[i]\n nums[i] = t\n zero_ptr += 1\n one_ptr = zero_ptr\n for i in range(zero_ptr, n):\n if nums[i] == 1:\n t = nums[one_ptr]\n nums[one_ptr] = nums[i]\n nums[i] = t\n one_ptr += 1\n print(nums[i])\n", "source": "the_stack_v2_python_sparse", "source_path": "FuckLeetcode/75. 颜色分类.py", "source_repo": "Alex-Beng/ojs", "split": "test", "star_events_count": 0} {"blob_id": "ea65c601728bb1648b9e47ddd64905bdd914ddbf", "bodies": ["dwi, phasediff = (kwargs.pop('dwi_file'), kwargs.pop('phasediff_file'))\nkwargs['encoding_direction'] = [self.fix_phase_encoding(dwi.get_phase_encoding_direction()), self.fix_phase_encoding(phasediff.get_phase_encoding_direction())]\nkwargs['readout_times'] = [dwi.get_total_readout_time(), phasediff.get_total_readout_time()]\nsuper().__init__(*args, **kwargs)", "for key, value in self.PHASE_ENCODING_DICT.items():\n phase_encoding = phase_encoding.replace(key, value)\nreturn phase_encoding"], "bodies_text": "<|body_start_0|>\n dwi, phasediff = (kwargs.pop('dwi_file'), kwargs.pop('phasediff_file'))\n kwargs['encoding_direction'] = [self.fix_phase_encoding(dwi.get_phase_encoding_direction()), self.fix_phase_encoding(phasediff.get_phase_encoding_direction())]\n kwargs['readout_times'] = [dwi.get_total_readout_time(), phasediff.get_total_readout_time()]\n super().__init__(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n for key, value in self.PHASE_ENCODING_DICT.items():\n phase_encoding = phase_encoding.replace(key, value)\n return phase_encoding\n<|end_body_1|>\n", "class_docstring": "A simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.", "class_name": "TopupWrapper", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TopupWrapper:\n \"\"\"A simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.\"\"\"\n <|body_0|>\n\n def fix_phase_encoding(self, phase_encoding: str) -> str:\n \"\"\"Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dwi, phasediff = (kwargs.pop('dwi_file'), kwargs.pop('phasediff_file'))\n kwargs['encoding_direction'] = [self.fix_phase_encoding(dwi.get_phase_encoding_direction()), self.fix_phase_encoding(phasediff.get_phase_encoding_direction())]\n kwargs['readout_times'] = [dwi.get_total_readout_time(), phasediff.get_total_readout_time()]\n super().__init__(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n for key, value in self.PHASE_ENCODING_DICT.items():\n phase_encoding = phase_encoding.replace(key, value)\n return phase_encoding\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000074", "length_bytes": 1582, "license_type": "permissive", "methods": [{"docstring": "Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding", "name": "fix_phase_encoding", "signature": "def fix_phase_encoding(self, phase_encoding: str) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002390", "prompt": "Implement the Python class `TopupWrapper` described below.\n\nClass description:\nA simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.\n- def fix_phase_encoding(self, phase_encoding: str) -> str: Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding", "prompted_full_text": "Implement the Python class `TopupWrapper` described below.\n\nClass description:\nA simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.\n- def fix_phase_encoding(self, phase_encoding: str) -> str: Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding\n\n<|skeleton|>\nclass TopupWrapper:\n \"\"\"A simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.\"\"\"\n <|body_0|>\n\n def fix_phase_encoding(self, phase_encoding: str) -> str:\n \"\"\"Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dwi, phasediff = (kwargs.pop('dwi_file'), kwargs.pop('phasediff_file'))\n kwargs['encoding_direction'] = [self.fix_phase_encoding(dwi.get_phase_encoding_direction()), self.fix_phase_encoding(phasediff.get_phase_encoding_direction())]\n kwargs['readout_times'] = [dwi.get_total_readout_time(), phasediff.get_total_readout_time()]\n super().__init__(*args, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n for key, value in self.PHASE_ENCODING_DICT.items():\n phase_encoding = phase_encoding.replace(key, value)\n return phase_encoding\n<|end_body_1|>\n", "revision_id": "5b5ca1b119144d01e526825d2b2a2b87541b4d4a", "skeleton": "<|skeleton|>\nclass TopupWrapper:\n \"\"\"A simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.\"\"\"\n <|body_0|>\n\n def fix_phase_encoding(self, phase_encoding: str) -> str:\n \"\"\"Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TopupWrapper:\n \"\"\"A simple subclass of nipype's :class:`~nipype.interfaces.fsl.TOPUP` interface, tweaking the interface's :meth:`__init__` method to make input specification easier.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Sets the *encoding_direction* and *readout_times* parameter values using the provided :class:`~django_mri.models.nifti.NIfTI` instances.\"\"\"\n dwi, phasediff = (kwargs.pop('dwi_file'), kwargs.pop('phasediff_file'))\n kwargs['encoding_direction'] = [self.fix_phase_encoding(dwi.get_phase_encoding_direction()), self.fix_phase_encoding(phasediff.get_phase_encoding_direction())]\n kwargs['readout_times'] = [dwi.get_total_readout_time(), phasediff.get_total_readout_time()]\n super().__init__(*args, **kwargs)\n\n def fix_phase_encoding(self, phase_encoding: str) -> str:\n \"\"\"Converts phase encoding values from *i, j, k* to *x, y, z*. Parameters ---------- phase_encoding : str Phase encoding Returns ------- str Coverted phase encoding\"\"\"\n for key, value in self.PHASE_ENCODING_DICT.items():\n phase_encoding = phase_encoding.replace(key, value)\n return phase_encoding\n", "source": "the_stack_v2_python_sparse", "source_path": "django_mri/analysis/interfaces/fsl/topup.py", "source_repo": "TheLabbingProject/django_mri", "split": "test", "star_events_count": 7} {"blob_id": "2aac94f99ef8c358b455b54d7fecea0063bc9394", "bodies": ["self._p1 = p1\nself._d1 = d1\nself._g = g\nself._upstream = Primitive(d1, p1, 0)\nself._csmax = sound_speed(p1, d1, g)\nd2 = disen(p2, d1, p1, g)\nv2 = visen(p2, d1, p1, g)\nself._csmin = sound_speed(p2, d2, g) + v2\nself._downstream = Primitive(d2, p2, v2)\nreturn", "if v > self._csmax:\n return Primitive(self._upstream.Density, self._upstream.Pressure, self._upstream.Velocity)\nelif v < self._csmin:\n return Primitive(self._downstream.Density, self._downstream.Pressure, self._downstream.Velocity)\nelse:\n u = (v - self._csmax) * 2 / (1 + self._g)\n c = self._csmax + (self._g - 1) * u / 2\n p = self._p1 * (c / self._csmax) ** (2 * self._g / (self._g - 1))\n d = disen(p, self._d1, self._p1, self._g)\n return Primitive(d, p, u)"], "bodies_text": "<|body_start_0|>\n self._p1 = p1\n self._d1 = d1\n self._g = g\n self._upstream = Primitive(d1, p1, 0)\n self._csmax = sound_speed(p1, d1, g)\n d2 = disen(p2, d1, p1, g)\n v2 = visen(p2, d1, p1, g)\n self._csmin = sound_speed(p2, d2, g) + v2\n self._downstream = Primitive(d2, p2, v2)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if v > self._csmax:\n return Primitive(self._upstream.Density, self._upstream.Pressure, self._upstream.Velocity)\n elif v < self._csmin:\n return Primitive(self._downstream.Density, self._downstream.Pressure, self._downstream.Velocity)\n else:\n u = (v - self._csmax) * 2 / (1 + self._g)\n c = self._csmax + (self._g - 1) * u / 2\n p = self._p1 * (c / self._csmax) ** (2 * self._g / (self._g - 1))\n d = disen(p, self._d1, self._p1, self._g)\n return Primitive(d, p, u)\n<|end_body_1|>\n", "class_docstring": "Spatial profile of a rarefaction wave", "class_name": "IsenProf", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IsenProf:\n \"\"\"Spatial profile of a rarefaction wave\"\"\"\n\n def __init__(self, p2, d1, p1, g):\n \"\"\"Class constructor\"\"\"\n <|body_0|>\n\n def CalcPrim(self, v):\n \"\"\"Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._p1 = p1\n self._d1 = d1\n self._g = g\n self._upstream = Primitive(d1, p1, 0)\n self._csmax = sound_speed(p1, d1, g)\n d2 = disen(p2, d1, p1, g)\n v2 = visen(p2, d1, p1, g)\n self._csmin = sound_speed(p2, d2, g) + v2\n self._downstream = Primitive(d2, p2, v2)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if v > self._csmax:\n return Primitive(self._upstream.Density, self._upstream.Pressure, self._upstream.Velocity)\n elif v < self._csmin:\n return Primitive(self._downstream.Density, self._downstream.Pressure, self._downstream.Velocity)\n else:\n u = (v - self._csmax) * 2 / (1 + self._g)\n c = self._csmax + (self._g - 1) * u / 2\n p = self._p1 * (c / self._csmax) ** (2 * self._g / (self._g - 1))\n d = disen(p, self._d1, self._p1, self._g)\n return Primitive(d, p, u)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000075", "length_bytes": 11309, "license_type": "no_license", "methods": [{"docstring": "Class constructor", "name": "__init__", "signature": "def __init__(self, p2, d1, p1, g)"}, {"docstring": "Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)", "name": "CalcPrim", "signature": "def CalcPrim(self, v)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005817", "prompt": "Implement the Python class `IsenProf` described below.\n\nClass description:\nSpatial profile of a rarefaction wave\n\nMethod signatures and docstrings:\n- def __init__(self, p2, d1, p1, g): Class constructor\n- def CalcPrim(self, v): Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)", "prompted_full_text": "Implement the Python class `IsenProf` described below.\n\nClass description:\nSpatial profile of a rarefaction wave\n\nMethod signatures and docstrings:\n- def __init__(self, p2, d1, p1, g): Class constructor\n- def CalcPrim(self, v): Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)\n\n<|skeleton|>\nclass IsenProf:\n \"\"\"Spatial profile of a rarefaction wave\"\"\"\n\n def __init__(self, p2, d1, p1, g):\n \"\"\"Class constructor\"\"\"\n <|body_0|>\n\n def CalcPrim(self, v):\n \"\"\"Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._p1 = p1\n self._d1 = d1\n self._g = g\n self._upstream = Primitive(d1, p1, 0)\n self._csmax = sound_speed(p1, d1, g)\n d2 = disen(p2, d1, p1, g)\n v2 = visen(p2, d1, p1, g)\n self._csmin = sound_speed(p2, d2, g) + v2\n self._downstream = Primitive(d2, p2, v2)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if v > self._csmax:\n return Primitive(self._upstream.Density, self._upstream.Pressure, self._upstream.Velocity)\n elif v < self._csmin:\n return Primitive(self._downstream.Density, self._downstream.Pressure, self._downstream.Velocity)\n else:\n u = (v - self._csmax) * 2 / (1 + self._g)\n c = self._csmax + (self._g - 1) * u / 2\n p = self._p1 * (c / self._csmax) ** (2 * self._g / (self._g - 1))\n d = disen(p, self._d1, self._p1, self._g)\n return Primitive(d, p, u)\n<|end_body_1|>\n", "revision_id": "58ec1a7e71b00ddb387160b39196d8dd9e40bd3f", "skeleton": "<|skeleton|>\nclass IsenProf:\n \"\"\"Spatial profile of a rarefaction wave\"\"\"\n\n def __init__(self, p2, d1, p1, g):\n \"\"\"Class constructor\"\"\"\n <|body_0|>\n\n def CalcPrim(self, v):\n \"\"\"Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IsenProf:\n \"\"\"Spatial profile of a rarefaction wave\"\"\"\n\n def __init__(self, p2, d1, p1, g):\n \"\"\"Class constructor\"\"\"\n self._p1 = p1\n self._d1 = d1\n self._g = g\n self._upstream = Primitive(d1, p1, 0)\n self._csmax = sound_speed(p1, d1, g)\n d2 = disen(p2, d1, p1, g)\n v2 = visen(p2, d1, p1, g)\n self._csmin = sound_speed(p2, d2, g) + v2\n self._downstream = Primitive(d2, p2, v2)\n return\n\n def CalcPrim(self, v):\n \"\"\"Calculates the primitive variables Input: v - Dimensionless coordinate (x/t)\"\"\"\n if v > self._csmax:\n return Primitive(self._upstream.Density, self._upstream.Pressure, self._upstream.Velocity)\n elif v < self._csmin:\n return Primitive(self._downstream.Density, self._downstream.Pressure, self._downstream.Velocity)\n else:\n u = (v - self._csmax) * 2 / (1 + self._g)\n c = self._csmax + (self._g - 1) * u / 2\n p = self._p1 * (c / self._csmax) ** (2 * self._g / (self._g - 1))\n d = disen(p, self._d1, self._p1, self._g)\n return Primitive(d, p, u)\n", "source": "the_stack_v2_python_sparse", "source_path": "analytic/enrs.py", "source_repo": "bolverk/huji-rich", "split": "test", "star_events_count": 16} {"blob_id": "93fd4d52773593a9dd25aa68a4c3e2c36261f1f6", "bodies": ["self.source = None\nself.data = None\nself.size = size\nself.choice_source = []\nself.weight = np.array([])", "self.source = data\nself.data = data\nself._handle_data_source()", "if isinstance(self.source, list):\n self.data = self.source\n self.weight = np.array([])\nelif isinstance(self.source, dict):\n self.data = list(self.source.keys())\n _weight = np.array(list(self.source.values()))\n self.weight = _weight / _weight.sum() if _weight.sum() != 0 else None", "try:\n total = int(total)\nexcept:\n total = 1\np = self.weight if self.weight.size > 0 else None\nif total <= 1:\n return choice(list(np.random.choice(self.data, size=self.size, replace=True, p=p)))\nelse:\n return list(np.random.choice(self.data, size=total, replace=True, p=p))", "if self.choice_source:\n return choice(self.data)\nelif self.weight.size > 0:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True, p=self.weight))\nelse:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True))\nreturn choice(self.choice_source)"], "bodies_text": "<|body_start_0|>\n self.source = None\n self.data = None\n self.size = size\n self.choice_source = []\n self.weight = np.array([])\n<|end_body_0|>\n\n<|body_start_1|>\n self.source = data\n self.data = data\n self._handle_data_source()\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.source, list):\n self.data = self.source\n self.weight = np.array([])\n elif isinstance(self.source, dict):\n self.data = list(self.source.keys())\n _weight = np.array(list(self.source.values()))\n self.weight = _weight / _weight.sum() if _weight.sum() != 0 else None\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n total = int(total)\n except:\n total = 1\n p = self.weight if self.weight.size > 0 else None\n if total <= 1:\n return choice(list(np.random.choice(self.data, size=self.size, replace=True, p=p)))\n else:\n return list(np.random.choice(self.data, size=total, replace=True, p=p))\n<|end_body_3|>\n\n<|body_start_4|>\n if self.choice_source:\n return choice(self.data)\n elif self.weight.size > 0:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True, p=self.weight))\n else:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True))\n return choice(self.choice_source)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "randomChoice", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass randomChoice:\n\n def __init__(self, size: int=5):\n \"\"\"随机选择类 外部参数: :param size:生成的数据量总数\"\"\"\n <|body_0|>\n\n def add(self, data: [List[Any], Dict]):\n \"\"\"添加数据集 :param data: 数据集 :return:\"\"\"\n <|body_1|>\n\n def _handle_data_source(self):\n \"\"\"处理数据源\"\"\"\n <|body_2|>\n\n def get(self, total: int=1) -> Any:\n \"\"\"随机选择指定数量的样本 :param total: 选择的数量 :return:\"\"\"\n <|body_3|>\n\n def choice(self) -> Any:\n \"\"\"获取一条随机选择的数据\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.source = None\n self.data = None\n self.size = size\n self.choice_source = []\n self.weight = np.array([])\n<|end_body_0|>\n\n<|body_start_1|>\n self.source = data\n self.data = data\n self._handle_data_source()\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.source, list):\n self.data = self.source\n self.weight = np.array([])\n elif isinstance(self.source, dict):\n self.data = list(self.source.keys())\n _weight = np.array(list(self.source.values()))\n self.weight = _weight / _weight.sum() if _weight.sum() != 0 else None\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n total = int(total)\n except:\n total = 1\n p = self.weight if self.weight.size > 0 else None\n if total <= 1:\n return choice(list(np.random.choice(self.data, size=self.size, replace=True, p=p)))\n else:\n return list(np.random.choice(self.data, size=total, replace=True, p=p))\n<|end_body_3|>\n\n<|body_start_4|>\n if self.choice_source:\n return choice(self.data)\n elif self.weight.size > 0:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True, p=self.weight))\n else:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True))\n return choice(self.choice_source)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000076", "length_bytes": 2458, "license_type": "no_license", "methods": [{"docstring": "随机选择类 外部参数: :param size:生成的数据量总数", "name": "__init__", "signature": "def __init__(self, size: int=5)"}, {"docstring": "添加数据集 :param data: 数据集 :return:", "name": "add", "signature": "def add(self, data: [List[Any], Dict])"}, {"docstring": "处理数据源", "name": "_handle_data_source", "signature": "def _handle_data_source(self)"}, {"docstring": "随机选择指定数量的样本 :param total: 选择的数量 :return:", "name": "get", "signature": "def get(self, total: int=1) -> Any"}, {"docstring": "获取一条随机选择的数据", "name": "choice", "signature": "def choice(self) -> Any"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_033386", "prompt": "Implement the Python class `randomChoice` described below.\n\nClass description:\nImplement the randomChoice class.\n\nMethod signatures and docstrings:\n- def __init__(self, size: int=5): 随机选择类 外部参数: :param size:生成的数据量总数\n- def add(self, data: [List[Any], Dict]): 添加数据集 :param data: 数据集 :return:\n- def _handle_data_source(self): 处理数据源\n- def get(self, total: int=1) -> Any: 随机选择指定数量的样本 :param total: 选择的数量 :return:\n- def choice(self) -> Any: 获取一条随机选择的数据", "prompted_full_text": "Implement the Python class `randomChoice` described below.\n\nClass description:\nImplement the randomChoice class.\n\nMethod signatures and docstrings:\n- def __init__(self, size: int=5): 随机选择类 外部参数: :param size:生成的数据量总数\n- def add(self, data: [List[Any], Dict]): 添加数据集 :param data: 数据集 :return:\n- def _handle_data_source(self): 处理数据源\n- def get(self, total: int=1) -> Any: 随机选择指定数量的样本 :param total: 选择的数量 :return:\n- def choice(self) -> Any: 获取一条随机选择的数据\n\n<|skeleton|>\nclass randomChoice:\n\n def __init__(self, size: int=5):\n \"\"\"随机选择类 外部参数: :param size:生成的数据量总数\"\"\"\n <|body_0|>\n\n def add(self, data: [List[Any], Dict]):\n \"\"\"添加数据集 :param data: 数据集 :return:\"\"\"\n <|body_1|>\n\n def _handle_data_source(self):\n \"\"\"处理数据源\"\"\"\n <|body_2|>\n\n def get(self, total: int=1) -> Any:\n \"\"\"随机选择指定数量的样本 :param total: 选择的数量 :return:\"\"\"\n <|body_3|>\n\n def choice(self) -> Any:\n \"\"\"获取一条随机选择的数据\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.source = None\n self.data = None\n self.size = size\n self.choice_source = []\n self.weight = np.array([])\n<|end_body_0|>\n\n<|body_start_1|>\n self.source = data\n self.data = data\n self._handle_data_source()\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.source, list):\n self.data = self.source\n self.weight = np.array([])\n elif isinstance(self.source, dict):\n self.data = list(self.source.keys())\n _weight = np.array(list(self.source.values()))\n self.weight = _weight / _weight.sum() if _weight.sum() != 0 else None\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n total = int(total)\n except:\n total = 1\n p = self.weight if self.weight.size > 0 else None\n if total <= 1:\n return choice(list(np.random.choice(self.data, size=self.size, replace=True, p=p)))\n else:\n return list(np.random.choice(self.data, size=total, replace=True, p=p))\n<|end_body_3|>\n\n<|body_start_4|>\n if self.choice_source:\n return choice(self.data)\n elif self.weight.size > 0:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True, p=self.weight))\n else:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True))\n return choice(self.choice_source)\n<|end_body_4|>\n", "revision_id": "a81c897e4481926daaafdbaf89d7087b793cd462", "skeleton": "<|skeleton|>\nclass randomChoice:\n\n def __init__(self, size: int=5):\n \"\"\"随机选择类 外部参数: :param size:生成的数据量总数\"\"\"\n <|body_0|>\n\n def add(self, data: [List[Any], Dict]):\n \"\"\"添加数据集 :param data: 数据集 :return:\"\"\"\n <|body_1|>\n\n def _handle_data_source(self):\n \"\"\"处理数据源\"\"\"\n <|body_2|>\n\n def get(self, total: int=1) -> Any:\n \"\"\"随机选择指定数量的样本 :param total: 选择的数量 :return:\"\"\"\n <|body_3|>\n\n def choice(self) -> Any:\n \"\"\"获取一条随机选择的数据\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class randomChoice:\n def __init__(self, size: int=5):\n \"\"\"随机选择类 外部参数: :param size:生成的数据量总数\"\"\"\n self.source = None\n self.data = None\n self.size = size\n self.choice_source = []\n self.weight = np.array([])\n\n def add(self, data: [List[Any], Dict]):\n \"\"\"添加数据集 :param data: 数据集 :return:\"\"\"\n self.source = data\n self.data = data\n self._handle_data_source()\n\n def _handle_data_source(self):\n \"\"\"处理数据源\"\"\"\n if isinstance(self.source, list):\n self.data = self.source\n self.weight = np.array([])\n elif isinstance(self.source, dict):\n self.data = list(self.source.keys())\n _weight = np.array(list(self.source.values()))\n self.weight = _weight / _weight.sum() if _weight.sum() != 0 else None\n\n def get(self, total: int=1) -> Any:\n \"\"\"随机选择指定数量的样本 :param total: 选择的数量 :return:\"\"\"\n try:\n total = int(total)\n except:\n total = 1\n p = self.weight if self.weight.size > 0 else None\n if total <= 1:\n return choice(list(np.random.choice(self.data, size=self.size, replace=True, p=p)))\n else:\n return list(np.random.choice(self.data, size=total, replace=True, p=p))\n\n def choice(self) -> Any:\n \"\"\"获取一条随机选择的数据\"\"\"\n if self.choice_source:\n return choice(self.data)\n elif self.weight.size > 0:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True, p=self.weight))\n else:\n self.choice_source = list(np.random.choice(self.data, size=self.size, replace=True))\n return choice(self.choice_source)\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/maybe/patterns/random_choice.py", "source_repo": "HalfLeaf/Maybe", "split": "test", "star_events_count": 1} {"blob_id": "025a94e3cccb0000d1612108a9f87dc72c8517dc", "bodies": ["self.bytes_transferred = bytes_transferred\nself.end_time_usecs = end_time_usecs\nself.failure_entities = failure_entities\nself.job_id = job_id\nself.job_run_id = job_run_id\nself.job_type = job_type\nself.sla_violated = sla_violated\nself.start_time_usecs = start_time_usecs\nself.status = status\nself.success_entities = success_entities\nself.total_entities = total_entities", "if dictionary is None:\n return None\nbytes_transferred = dictionary.get('bytesTransferred')\nend_time_usecs = dictionary.get('endTimeUsecs')\nfailure_entities = dictionary.get('failureEntities')\njob_id = dictionary.get('jobId')\njob_run_id = dictionary.get('jobRunId')\njob_type = dictionary.get('jobType')\nsla_violated = dictionary.get('slaViolated')\nstart_time_usecs = dictionary.get('startTimeUsecs')\nstatus = dictionary.get('status')\nsuccess_entities = dictionary.get('successEntities')\ntotal_entities = dictionary.get('totalEntities')\nreturn cls(bytes_transferred, end_time_usecs, failure_entities, job_id, job_run_id, job_type, sla_violated, start_time_usecs, status, success_entities, total_entities)"], "bodies_text": "<|body_start_0|>\n self.bytes_transferred = bytes_transferred\n self.end_time_usecs = end_time_usecs\n self.failure_entities = failure_entities\n self.job_id = job_id\n self.job_run_id = job_run_id\n self.job_type = job_type\n self.sla_violated = sla_violated\n self.start_time_usecs = start_time_usecs\n self.status = status\n self.success_entities = success_entities\n self.total_entities = total_entities\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n bytes_transferred = dictionary.get('bytesTransferred')\n end_time_usecs = dictionary.get('endTimeUsecs')\n failure_entities = dictionary.get('failureEntities')\n job_id = dictionary.get('jobId')\n job_run_id = dictionary.get('jobRunId')\n job_type = dictionary.get('jobType')\n sla_violated = dictionary.get('slaViolated')\n start_time_usecs = dictionary.get('startTimeUsecs')\n status = dictionary.get('status')\n success_entities = dictionary.get('successEntities')\n total_entities = dictionary.get('totalEntities')\n return cls(bytes_transferred, end_time_usecs, failure_entities, job_id, job_run_id, job_type, sla_violated, start_time_usecs, status, success_entities, total_entities)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec", "class_name": "GetJobRunInfoResult", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetJobRunInfoResult:\n \"\"\"Implementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec\"\"\"\n\n def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None):\n \"\"\"Constructor for the GetJobRunInfoResult class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.bytes_transferred = bytes_transferred\n self.end_time_usecs = end_time_usecs\n self.failure_entities = failure_entities\n self.job_id = job_id\n self.job_run_id = job_run_id\n self.job_type = job_type\n self.sla_violated = sla_violated\n self.start_time_usecs = start_time_usecs\n self.status = status\n self.success_entities = success_entities\n self.total_entities = total_entities\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n bytes_transferred = dictionary.get('bytesTransferred')\n end_time_usecs = dictionary.get('endTimeUsecs')\n failure_entities = dictionary.get('failureEntities')\n job_id = dictionary.get('jobId')\n job_run_id = dictionary.get('jobRunId')\n job_type = dictionary.get('jobType')\n sla_violated = dictionary.get('slaViolated')\n start_time_usecs = dictionary.get('startTimeUsecs')\n status = dictionary.get('status')\n success_entities = dictionary.get('successEntities')\n total_entities = dictionary.get('totalEntities')\n return cls(bytes_transferred, end_time_usecs, failure_entities, job_id, job_run_id, job_type, sla_violated, start_time_usecs, status, success_entities, total_entities)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000077", "length_bytes": 4093, "license_type": "permissive", "methods": [{"docstring": "Constructor for the GetJobRunInfoResult class", "name": "__init__", "signature": "def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024914", "prompt": "Implement the Python class `GetJobRunInfoResult` described below.\n\nClass description:\nImplementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec\n\nMethod signatures and docstrings:\n- def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None): Constructor for the GetJobRunInfoResult class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `GetJobRunInfoResult` described below.\n\nClass description:\nImplementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec\n\nMethod signatures and docstrings:\n- def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None): Constructor for the GetJobRunInfoResult class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass GetJobRunInfoResult:\n \"\"\"Implementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec\"\"\"\n\n def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None):\n \"\"\"Constructor for the GetJobRunInfoResult class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.bytes_transferred = bytes_transferred\n self.end_time_usecs = end_time_usecs\n self.failure_entities = failure_entities\n self.job_id = job_id\n self.job_run_id = job_run_id\n self.job_type = job_type\n self.sla_violated = sla_violated\n self.start_time_usecs = start_time_usecs\n self.status = status\n self.success_entities = success_entities\n self.total_entities = total_entities\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n bytes_transferred = dictionary.get('bytesTransferred')\n end_time_usecs = dictionary.get('endTimeUsecs')\n failure_entities = dictionary.get('failureEntities')\n job_id = dictionary.get('jobId')\n job_run_id = dictionary.get('jobRunId')\n job_type = dictionary.get('jobType')\n sla_violated = dictionary.get('slaViolated')\n start_time_usecs = dictionary.get('startTimeUsecs')\n status = dictionary.get('status')\n success_entities = dictionary.get('successEntities')\n total_entities = dictionary.get('totalEntities')\n return cls(bytes_transferred, end_time_usecs, failure_entities, job_id, job_run_id, job_type, sla_violated, start_time_usecs, status, success_entities, total_entities)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass GetJobRunInfoResult:\n \"\"\"Implementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec\"\"\"\n\n def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None):\n \"\"\"Constructor for the GetJobRunInfoResult class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GetJobRunInfoResult:\n \"\"\"Implementation of the 'GetJobRunInfoResult' model. TODO: type description here. Attributes: bytes_transferred (long|int): Specifies bytes transferred in the run. end_time_usecs (long|int): Specifies the end time of the run. failure_entities (long|int): Specifies the number of failed objects in the run. job_id (string): Specifies the job id. job_run_id (string): Specifies the job run id. job_type (string): Specifies the job type, protection, replication, archival, apollo, indexing etc. sla_violated (bool): Specifies if the sla was violated the run. start_time_usecs (long|int): Specifies the start time of the run. status (long|int): Specifies status of the run success_entities (long|int): Spec\"\"\"\n\n def __init__(self, bytes_transferred=None, end_time_usecs=None, failure_entities=None, job_id=None, job_run_id=None, job_type=None, sla_violated=None, start_time_usecs=None, status=None, success_entities=None, total_entities=None):\n \"\"\"Constructor for the GetJobRunInfoResult class\"\"\"\n self.bytes_transferred = bytes_transferred\n self.end_time_usecs = end_time_usecs\n self.failure_entities = failure_entities\n self.job_id = job_id\n self.job_run_id = job_run_id\n self.job_type = job_type\n self.sla_violated = sla_violated\n self.start_time_usecs = start_time_usecs\n self.status = status\n self.success_entities = success_entities\n self.total_entities = total_entities\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n bytes_transferred = dictionary.get('bytesTransferred')\n end_time_usecs = dictionary.get('endTimeUsecs')\n failure_entities = dictionary.get('failureEntities')\n job_id = dictionary.get('jobId')\n job_run_id = dictionary.get('jobRunId')\n job_type = dictionary.get('jobType')\n sla_violated = dictionary.get('slaViolated')\n start_time_usecs = dictionary.get('startTimeUsecs')\n status = dictionary.get('status')\n success_entities = dictionary.get('successEntities')\n total_entities = dictionary.get('totalEntities')\n return cls(bytes_transferred, end_time_usecs, failure_entities, job_id, job_run_id, job_type, sla_violated, start_time_usecs, status, success_entities, total_entities)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/get_job_run_info_result.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "1d7756b94041f88c1e02995bb1fde5dd0b1dd23f", "bodies": ["ret = []\nqueue = collections.deque([root])\nwhile queue:\n node = queue.popleft()\n if not node:\n ret.append('null')\n else:\n ret.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\nwhile ret and ret[-1] == 'null':\n ret.pop()\nreturn ','.join(ret)", "if data == '':\n return None\ndata = data.split(',')\nroot = TreeNode(data[0])\nque = collections.deque([root])\ni = 1\nwhile que and i < len(data):\n node = que.popleft()\n if data[i] != 'null':\n node.left = TreeNode(data[i])\n que.append(node.left)\n i += 1\n if i < len(data) and data[i] != 'null':\n node.right = TreeNode(data[i])\n que.append(node.right)\n i += 1\nreturn root"], "bodies_text": "<|body_start_0|>\n ret = []\n queue = collections.deque([root])\n while queue:\n node = queue.popleft()\n if not node:\n ret.append('null')\n else:\n ret.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n while ret and ret[-1] == 'null':\n ret.pop()\n return ','.join(ret)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n data = data.split(',')\n root = TreeNode(data[0])\n que = collections.deque([root])\n i = 1\n while que and i < len(data):\n node = que.popleft()\n if data[i] != 'null':\n node.left = TreeNode(data[i])\n que.append(node.left)\n i += 1\n if i < len(data) and data[i] != 'null':\n node.right = TreeNode(data[i])\n que.append(node.right)\n i += 1\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ret = []\n queue = collections.deque([root])\n while queue:\n node = queue.popleft()\n if not node:\n ret.append('null')\n else:\n ret.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n while ret and ret[-1] == 'null':\n ret.pop()\n return ','.join(ret)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n data = data.split(',')\n root = TreeNode(data[0])\n que = collections.deque([root])\n i = 1\n while que and i < len(data):\n node = que.popleft()\n if data[i] != 'null':\n node.left = TreeNode(data[i])\n que.append(node.left)\n i += 1\n if i < len(data) and data[i] != 'null':\n node.right = TreeNode(data[i])\n que.append(node.right)\n i += 1\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000078", "length_bytes": 1638, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_050488", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ret = []\n queue = collections.deque([root])\n while queue:\n node = queue.popleft()\n if not node:\n ret.append('null')\n else:\n ret.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n while ret and ret[-1] == 'null':\n ret.pop()\n return ','.join(ret)\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n data = data.split(',')\n root = TreeNode(data[0])\n que = collections.deque([root])\n i = 1\n while que and i < len(data):\n node = que.popleft()\n if data[i] != 'null':\n node.left = TreeNode(data[i])\n que.append(node.left)\n i += 1\n if i < len(data) and data[i] != 'null':\n node.right = TreeNode(data[i])\n que.append(node.right)\n i += 1\n return root\n<|end_body_1|>\n", "revision_id": "54d0b3c237e0ffed8782915d6b75b7c6a0fe0de7", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n ret = []\n queue = collections.deque([root])\n while queue:\n node = queue.popleft()\n if not node:\n ret.append('null')\n else:\n ret.append(str(node.val))\n queue.append(node.left)\n queue.append(node.right)\n while ret and ret[-1] == 'null':\n ret.pop()\n return ','.join(ret)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n if data == '':\n return None\n data = data.split(',')\n root = TreeNode(data[0])\n que = collections.deque([root])\n i = 1\n while que and i < len(data):\n node = que.popleft()\n if data[i] != 'null':\n node.left = TreeNode(data[i])\n que.append(node.left)\n i += 1\n if i < len(data) and data[i] != 'null':\n node.right = TreeNode(data[i])\n que.append(node.right)\n i += 1\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "0297_Serialize_and_Deserialize_Binary_Tree/try_2.py", "source_repo": "novayo/LeetCode", "split": "test", "star_events_count": 8} {"blob_id": "243c36b0cd2c6dd52cd1c66eb07a159d6d03f122", "bodies": ["ObjectManager.__init__(self)\nself.getters.update({'name': 'get_general', 'organizations': 'get_many_to_many', 'users': 'get_many_to_many', 'user_org_roles': 'get_many_to_one'})\nself.setters.update({'name': 'set_general', 'organizations': 'set_many', 'users': 'set_many'})\nself.my_django_model = facade.models.OrgRole", "o = self.my_django_model.objects.create(name=name)\nself.authorizer.check_create_permissions(auth_token, o)\nreturn o"], "bodies_text": "<|body_start_0|>\n ObjectManager.__init__(self)\n self.getters.update({'name': 'get_general', 'organizations': 'get_many_to_many', 'users': 'get_many_to_many', 'user_org_roles': 'get_many_to_one'})\n self.setters.update({'name': 'set_general', 'organizations': 'set_many', 'users': 'set_many'})\n self.my_django_model = facade.models.OrgRole\n<|end_body_0|>\n\n<|body_start_1|>\n o = self.my_django_model.objects.create(name=name)\n self.authorizer.check_create_permissions(auth_token, o)\n return o\n<|end_body_1|>\n", "class_docstring": "Manage roles that users can have in organizations in the Power Reg system", "class_name": "OrgRoleManager", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OrgRoleManager:\n \"\"\"Manage roles that users can have in organizations in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, name):\n \"\"\"Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ObjectManager.__init__(self)\n self.getters.update({'name': 'get_general', 'organizations': 'get_many_to_many', 'users': 'get_many_to_many', 'user_org_roles': 'get_many_to_one'})\n self.setters.update({'name': 'set_general', 'organizations': 'set_many', 'users': 'set_many'})\n self.my_django_model = facade.models.OrgRole\n<|end_body_0|>\n\n<|body_start_1|>\n o = self.my_django_model.objects.create(name=name)\n self.authorizer.check_create_permissions(auth_token, o)\n return o\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000079", "length_bytes": 1345, "license_type": "permissive", "methods": [{"docstring": "constructor", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole", "name": "create", "signature": "def create(self, auth_token, name)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015157", "prompt": "Implement the Python class `OrgRoleManager` described below.\n\nClass description:\nManage roles that users can have in organizations in the Power Reg system\n\nMethod signatures and docstrings:\n- def __init__(self): constructor\n- def create(self, auth_token, name): Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole", "prompted_full_text": "Implement the Python class `OrgRoleManager` described below.\n\nClass description:\nManage roles that users can have in organizations in the Power Reg system\n\nMethod signatures and docstrings:\n- def __init__(self): constructor\n- def create(self, auth_token, name): Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole\n\n<|skeleton|>\nclass OrgRoleManager:\n \"\"\"Manage roles that users can have in organizations in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, name):\n \"\"\"Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ObjectManager.__init__(self)\n self.getters.update({'name': 'get_general', 'organizations': 'get_many_to_many', 'users': 'get_many_to_many', 'user_org_roles': 'get_many_to_one'})\n self.setters.update({'name': 'set_general', 'organizations': 'set_many', 'users': 'set_many'})\n self.my_django_model = facade.models.OrgRole\n<|end_body_0|>\n\n<|body_start_1|>\n o = self.my_django_model.objects.create(name=name)\n self.authorizer.check_create_permissions(auth_token, o)\n return o\n<|end_body_1|>\n", "revision_id": "a59457bc37f0501aea1f54d006a6de94ff80511c", "skeleton": "<|skeleton|>\nclass OrgRoleManager:\n \"\"\"Manage roles that users can have in organizations in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, name):\n \"\"\"Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OrgRoleManager:\n \"\"\"Manage roles that users can have in organizations in the Power Reg system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n ObjectManager.__init__(self)\n self.getters.update({'name': 'get_general', 'organizations': 'get_many_to_many', 'users': 'get_many_to_many', 'user_org_roles': 'get_many_to_one'})\n self.setters.update({'name': 'set_general', 'organizations': 'set_many', 'users': 'set_many'})\n self.my_django_model = facade.models.OrgRole\n\n def create(self, auth_token, name):\n \"\"\"Create a new OrgRole @param name name of the OrgRole @return a reference to the newly created OrgRole\"\"\"\n o = self.my_django_model.objects.create(name=name)\n self.authorizer.check_create_permissions(auth_token, o)\n return o\n", "source": "the_stack_v2_python_sparse", "source_path": "pr_services/user_system/organization_role_manager.py", "source_repo": "ninemoreminutes/openassign-server", "split": "test", "star_events_count": 0} {"blob_id": "84e88f932896ef1b4245344f2a43f27d74ee2127", "bodies": ["LinuxInit.enable(self)\ninit_path = '/etc/systemd/system/{0}.service'.format(self.name)\nwith open(os.path.join(self.template_dir, 'monasca-agent.service.template'), 'r') as template:\n with open(init_path, 'w') as service_script:\n service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username, config_dir=self.config_dir))\nos.chown(init_path, 0, 0)\nos.chmod(init_path, 420)\nsubprocess.check_call(['systemctl', 'daemon-reload'])\nsubprocess.check_call(['systemctl', 'enable', '{0}.service'.format(self.name)])\nlog.info('Enabled {0} service via systemd'.format(self.name))", "LinuxInit.start(self)\nlog.info('Starting {0} service via systemd'.format(self.name))\nif restart:\n subprocess.check_call(['systemctl', 'restart', '{0}.service'.format(self.name)])\nelse:\n subprocess.check_call(['systemctl', 'start', '{0}.service'.format(self.name)])\nreturn True", "LinuxInit.stop(self)\nlog.info('Stopping {0} service'.format(self.name))\nsubprocess.check_call(['systemctl', 'stop', '{0}.service'.format(self.name)])\nreturn True", "try:\n subprocess.check_output(['systemctl', 'is-enabled', '{0}.service'.format(self.name)])\nexcept subprocess.CalledProcessError:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n LinuxInit.enable(self)\n init_path = '/etc/systemd/system/{0}.service'.format(self.name)\n with open(os.path.join(self.template_dir, 'monasca-agent.service.template'), 'r') as template:\n with open(init_path, 'w') as service_script:\n service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username, config_dir=self.config_dir))\n os.chown(init_path, 0, 0)\n os.chmod(init_path, 420)\n subprocess.check_call(['systemctl', 'daemon-reload'])\n subprocess.check_call(['systemctl', 'enable', '{0}.service'.format(self.name)])\n log.info('Enabled {0} service via systemd'.format(self.name))\n<|end_body_0|>\n\n<|body_start_1|>\n LinuxInit.start(self)\n log.info('Starting {0} service via systemd'.format(self.name))\n if restart:\n subprocess.check_call(['systemctl', 'restart', '{0}.service'.format(self.name)])\n else:\n subprocess.check_call(['systemctl', 'start', '{0}.service'.format(self.name)])\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n LinuxInit.stop(self)\n log.info('Stopping {0} service'.format(self.name))\n subprocess.check_call(['systemctl', 'stop', '{0}.service'.format(self.name)])\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n subprocess.check_output(['systemctl', 'is-enabled', '{0}.service'.format(self.name)])\n except subprocess.CalledProcessError:\n return False\n return True\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Systemd", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Systemd:\n\n def enable(self):\n \"\"\"Sets monasca-agent to start on boot. Generally this requires running as super user\"\"\"\n <|body_0|>\n\n def start(self, restart=True):\n \"\"\"Starts monasca-agent. If the agent is running and restart is True, restart\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stops monasca-agent.\"\"\"\n <|body_2|>\n\n def is_enabled(self):\n \"\"\"Returns True if monasca-agent is setup to start on boot, false otherwise.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n LinuxInit.enable(self)\n init_path = '/etc/systemd/system/{0}.service'.format(self.name)\n with open(os.path.join(self.template_dir, 'monasca-agent.service.template'), 'r') as template:\n with open(init_path, 'w') as service_script:\n service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username, config_dir=self.config_dir))\n os.chown(init_path, 0, 0)\n os.chmod(init_path, 420)\n subprocess.check_call(['systemctl', 'daemon-reload'])\n subprocess.check_call(['systemctl', 'enable', '{0}.service'.format(self.name)])\n log.info('Enabled {0} service via systemd'.format(self.name))\n<|end_body_0|>\n\n<|body_start_1|>\n LinuxInit.start(self)\n log.info('Starting {0} service via systemd'.format(self.name))\n if restart:\n subprocess.check_call(['systemctl', 'restart', '{0}.service'.format(self.name)])\n else:\n subprocess.check_call(['systemctl', 'start', '{0}.service'.format(self.name)])\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n LinuxInit.stop(self)\n log.info('Stopping {0} service'.format(self.name))\n subprocess.check_call(['systemctl', 'stop', '{0}.service'.format(self.name)])\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n subprocess.check_output(['systemctl', 'is-enabled', '{0}.service'.format(self.name)])\n except subprocess.CalledProcessError:\n return False\n return True\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000080", "length_bytes": 5913, "license_type": "permissive", "methods": [{"docstring": "Sets monasca-agent to start on boot. Generally this requires running as super user", "name": "enable", "signature": "def enable(self)"}, {"docstring": "Starts monasca-agent. If the agent is running and restart is True, restart", "name": "start", "signature": "def start(self, restart=True)"}, {"docstring": "Stops monasca-agent.", "name": "stop", "signature": "def stop(self)"}, {"docstring": "Returns True if monasca-agent is setup to start on boot, false otherwise.", "name": "is_enabled", "signature": "def is_enabled(self)"}], "n_methods": 4, "prompt": "Implement the Python class `Systemd` described below.\n\nClass description:\nImplement the Systemd class.\n\nMethod signatures and docstrings:\n- def enable(self): Sets monasca-agent to start on boot. Generally this requires running as super user\n- def start(self, restart=True): Starts monasca-agent. If the agent is running and restart is True, restart\n- def stop(self): Stops monasca-agent.\n- def is_enabled(self): Returns True if monasca-agent is setup to start on boot, false otherwise.", "prompted_full_text": "Implement the Python class `Systemd` described below.\n\nClass description:\nImplement the Systemd class.\n\nMethod signatures and docstrings:\n- def enable(self): Sets monasca-agent to start on boot. Generally this requires running as super user\n- def start(self, restart=True): Starts monasca-agent. If the agent is running and restart is True, restart\n- def stop(self): Stops monasca-agent.\n- def is_enabled(self): Returns True if monasca-agent is setup to start on boot, false otherwise.\n\n<|skeleton|>\nclass Systemd:\n\n def enable(self):\n \"\"\"Sets monasca-agent to start on boot. Generally this requires running as super user\"\"\"\n <|body_0|>\n\n def start(self, restart=True):\n \"\"\"Starts monasca-agent. If the agent is running and restart is True, restart\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stops monasca-agent.\"\"\"\n <|body_2|>\n\n def is_enabled(self):\n \"\"\"Returns True if monasca-agent is setup to start on boot, false otherwise.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n LinuxInit.enable(self)\n init_path = '/etc/systemd/system/{0}.service'.format(self.name)\n with open(os.path.join(self.template_dir, 'monasca-agent.service.template'), 'r') as template:\n with open(init_path, 'w') as service_script:\n service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username, config_dir=self.config_dir))\n os.chown(init_path, 0, 0)\n os.chmod(init_path, 420)\n subprocess.check_call(['systemctl', 'daemon-reload'])\n subprocess.check_call(['systemctl', 'enable', '{0}.service'.format(self.name)])\n log.info('Enabled {0} service via systemd'.format(self.name))\n<|end_body_0|>\n\n<|body_start_1|>\n LinuxInit.start(self)\n log.info('Starting {0} service via systemd'.format(self.name))\n if restart:\n subprocess.check_call(['systemctl', 'restart', '{0}.service'.format(self.name)])\n else:\n subprocess.check_call(['systemctl', 'start', '{0}.service'.format(self.name)])\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n LinuxInit.stop(self)\n log.info('Stopping {0} service'.format(self.name))\n subprocess.check_call(['systemctl', 'stop', '{0}.service'.format(self.name)])\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n subprocess.check_output(['systemctl', 'is-enabled', '{0}.service'.format(self.name)])\n except subprocess.CalledProcessError:\n return False\n return True\n<|end_body_3|>\n", "revision_id": "7c381f2ab5320ccbdabadc132f8deddc88691479", "skeleton": "<|skeleton|>\nclass Systemd:\n\n def enable(self):\n \"\"\"Sets monasca-agent to start on boot. Generally this requires running as super user\"\"\"\n <|body_0|>\n\n def start(self, restart=True):\n \"\"\"Starts monasca-agent. If the agent is running and restart is True, restart\"\"\"\n <|body_1|>\n\n def stop(self):\n \"\"\"Stops monasca-agent.\"\"\"\n <|body_2|>\n\n def is_enabled(self):\n \"\"\"Returns True if monasca-agent is setup to start on boot, false otherwise.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Systemd:\n def enable(self):\n \"\"\"Sets monasca-agent to start on boot. Generally this requires running as super user\"\"\"\n LinuxInit.enable(self)\n init_path = '/etc/systemd/system/{0}.service'.format(self.name)\n with open(os.path.join(self.template_dir, 'monasca-agent.service.template'), 'r') as template:\n with open(init_path, 'w') as service_script:\n service_script.write(template.read().format(prefix=self.prefix_dir, monasca_user=self.username, config_dir=self.config_dir))\n os.chown(init_path, 0, 0)\n os.chmod(init_path, 420)\n subprocess.check_call(['systemctl', 'daemon-reload'])\n subprocess.check_call(['systemctl', 'enable', '{0}.service'.format(self.name)])\n log.info('Enabled {0} service via systemd'.format(self.name))\n\n def start(self, restart=True):\n \"\"\"Starts monasca-agent. If the agent is running and restart is True, restart\"\"\"\n LinuxInit.start(self)\n log.info('Starting {0} service via systemd'.format(self.name))\n if restart:\n subprocess.check_call(['systemctl', 'restart', '{0}.service'.format(self.name)])\n else:\n subprocess.check_call(['systemctl', 'start', '{0}.service'.format(self.name)])\n return True\n\n def stop(self):\n \"\"\"Stops monasca-agent.\"\"\"\n LinuxInit.stop(self)\n log.info('Stopping {0} service'.format(self.name))\n subprocess.check_call(['systemctl', 'stop', '{0}.service'.format(self.name)])\n return True\n\n def is_enabled(self):\n \"\"\"Returns True if monasca-agent is setup to start on boot, false otherwise.\"\"\"\n try:\n subprocess.check_output(['systemctl', 'is-enabled', '{0}.service'.format(self.name)])\n except subprocess.CalledProcessError:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "monasca_setup/service/linux.py", "source_repo": "telefonicaid/monasca-agent", "split": "test", "star_events_count": 0} {"blob_id": "0f761daf0f2f8a5e82eda6ed3436e3996294a8b0", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "class_docstring": "定义主机信息识别服务", "class_name": "HostServicer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HostServicer:\n \"\"\"定义主机信息识别服务\"\"\"\n\n def Location(self, request, context):\n \"\"\"定义IP信息查询方法\"\"\"\n <|body_0|>\n\n def Alive(self, request, context):\n \"\"\"定义主机存活方法\"\"\"\n <|body_1|>\n\n def Detail(self, request, context):\n \"\"\"定义服务识别方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000081", "length_bytes": 15079, "license_type": "no_license", "methods": [{"docstring": "定义IP信息查询方法", "name": "Location", "signature": "def Location(self, request, context)"}, {"docstring": "定义主机存活方法", "name": "Alive", "signature": "def Alive(self, request, context)"}, {"docstring": "定义服务识别方法", "name": "Detail", "signature": "def Detail(self, request, context)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_002435", "prompt": "Implement the Python class `HostServicer` described below.\n\nClass description:\n定义主机信息识别服务\n\nMethod signatures and docstrings:\n- def Location(self, request, context): 定义IP信息查询方法\n- def Alive(self, request, context): 定义主机存活方法\n- def Detail(self, request, context): 定义服务识别方法", "prompted_full_text": "Implement the Python class `HostServicer` described below.\n\nClass description:\n定义主机信息识别服务\n\nMethod signatures and docstrings:\n- def Location(self, request, context): 定义IP信息查询方法\n- def Alive(self, request, context): 定义主机存活方法\n- def Detail(self, request, context): 定义服务识别方法\n\n<|skeleton|>\nclass HostServicer:\n \"\"\"定义主机信息识别服务\"\"\"\n\n def Location(self, request, context):\n \"\"\"定义IP信息查询方法\"\"\"\n <|body_0|>\n\n def Alive(self, request, context):\n \"\"\"定义主机存活方法\"\"\"\n <|body_1|>\n\n def Detail(self, request, context):\n \"\"\"定义服务识别方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n", "revision_id": "c2389b6d9c47d9a1a2e63c8e0dc3fc132943b444", "skeleton": "<|skeleton|>\nclass HostServicer:\n \"\"\"定义主机信息识别服务\"\"\"\n\n def Location(self, request, context):\n \"\"\"定义IP信息查询方法\"\"\"\n <|body_0|>\n\n def Alive(self, request, context):\n \"\"\"定义主机存活方法\"\"\"\n <|body_1|>\n\n def Detail(self, request, context):\n \"\"\"定义服务识别方法\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HostServicer:\n \"\"\"定义主机信息识别服务\"\"\"\n\n def Location(self, request, context):\n \"\"\"定义IP信息查询方法\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Alive(self, request, context):\n \"\"\"定义主机存活方法\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Detail(self, request, context):\n \"\"\"定义服务识别方法\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "micro/proto/grpc/aquaman_pb2_grpc.py", "source_repo": "jstang9527/buoy", "split": "test", "star_events_count": 2} {"blob_id": "bb7161bea03f7daa13763aa125c4151d03b498b1", "bodies": ["self.start_name = start_name\nself.start_address = start_address\nself.start_location = start_location\nself.route_add_instructions = route_add_instructions\nsuper(ExternallySourcedRouteStartStopDetails, self).__init__(stop_name, stop_location, stop_address, stop_deadline)", "if dictionary is None:\n return None\nstart_name = dictionary.get('startName')\nstart_location = dictionary.get('startLocation')\nstop_name = dictionary.get('stopName')\nstop_location = dictionary.get('stopLocation')\nstart_address = dictionary.get('startAddress')\nroute_add_instructions = dictionary.get('routeAddInstructions')\nstop_address = dictionary.get('stopAddress')\nstop_deadline = dictionary.get('stopDeadline')\nreturn cls(start_name, start_location, stop_name, stop_location, start_address, route_add_instructions, stop_address, stop_deadline)"], "bodies_text": "<|body_start_0|>\n self.start_name = start_name\n self.start_address = start_address\n self.start_location = start_location\n self.route_add_instructions = route_add_instructions\n super(ExternallySourcedRouteStartStopDetails, self).__init__(stop_name, stop_location, stop_address, stop_deadline)\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n start_name = dictionary.get('startName')\n start_location = dictionary.get('startLocation')\n stop_name = dictionary.get('stopName')\n stop_location = dictionary.get('stopLocation')\n start_address = dictionary.get('startAddress')\n route_add_instructions = dictionary.get('routeAddInstructions')\n stop_address = dictionary.get('stopAddress')\n stop_deadline = dictionary.get('stopDeadline')\n return cls(start_name, start_location, stop_name, stop_location, start_address, route_add_instructions, stop_address, stop_deadline)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route", "class_name": "ExternallySourcedRouteStartStopDetails", "detected_licenses": ["MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExternallySourcedRouteStartStopDetails:\n \"\"\"Implementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route\"\"\"\n\n def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None):\n \"\"\"Constructor for the ExternallySourcedRouteStartStopDetails class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.start_name = start_name\n self.start_address = start_address\n self.start_location = start_location\n self.route_add_instructions = route_add_instructions\n super(ExternallySourcedRouteStartStopDetails, self).__init__(stop_name, stop_location, stop_address, stop_deadline)\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n start_name = dictionary.get('startName')\n start_location = dictionary.get('startLocation')\n stop_name = dictionary.get('stopName')\n stop_location = dictionary.get('stopLocation')\n start_address = dictionary.get('startAddress')\n route_add_instructions = dictionary.get('routeAddInstructions')\n stop_address = dictionary.get('stopAddress')\n stop_deadline = dictionary.get('stopDeadline')\n return cls(start_name, start_location, stop_name, stop_location, start_address, route_add_instructions, stop_address, stop_deadline)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000082", "length_bytes": 5623, "license_type": "permissive", "methods": [{"docstring": "Constructor for the ExternallySourcedRouteStartStopDetails class", "name": "__init__", "signature": "def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041114", "prompt": "Implement the Python class `ExternallySourcedRouteStartStopDetails` described below.\n\nClass description:\nImplementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route\n\nMethod signatures and docstrings:\n- def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None): Constructor for the ExternallySourcedRouteStartStopDetails class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `ExternallySourcedRouteStartStopDetails` described below.\n\nClass description:\nImplementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route\n\nMethod signatures and docstrings:\n- def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None): Constructor for the ExternallySourcedRouteStartStopDetails class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass ExternallySourcedRouteStartStopDetails:\n \"\"\"Implementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route\"\"\"\n\n def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None):\n \"\"\"Constructor for the ExternallySourcedRouteStartStopDetails class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.start_name = start_name\n self.start_address = start_address\n self.start_location = start_location\n self.route_add_instructions = route_add_instructions\n super(ExternallySourcedRouteStartStopDetails, self).__init__(stop_name, stop_location, stop_address, stop_deadline)\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n start_name = dictionary.get('startName')\n start_location = dictionary.get('startLocation')\n stop_name = dictionary.get('stopName')\n stop_location = dictionary.get('stopLocation')\n start_address = dictionary.get('startAddress')\n route_add_instructions = dictionary.get('routeAddInstructions')\n stop_address = dictionary.get('stopAddress')\n stop_deadline = dictionary.get('stopDeadline')\n return cls(start_name, start_location, stop_name, stop_location, start_address, route_add_instructions, stop_address, stop_deadline)\n<|end_body_1|>\n", "revision_id": "729e9391879e273545a4818558677b2e47261f08", "skeleton": "<|skeleton|>\nclass ExternallySourcedRouteStartStopDetails:\n \"\"\"Implementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route\"\"\"\n\n def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None):\n \"\"\"Constructor for the ExternallySourcedRouteStartStopDetails class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExternallySourcedRouteStartStopDetails:\n \"\"\"Implementation of the 'Externally Sourced Route Start Stop Details' model. TODO: type model description here. NOTE: This class inherits from 'ExternallySourcedRouteStopDetails'. Attributes: start_name (string): a name for the start location start_address (string): an optional street address of the start start_location (string): the location of the start route_add_instructions (string): an optional comment with details about the route\"\"\"\n\n def __init__(self, start_name=None, start_location=None, stop_name=None, stop_location=None, start_address=None, route_add_instructions=None, stop_address=None, stop_deadline=None):\n \"\"\"Constructor for the ExternallySourcedRouteStartStopDetails class\"\"\"\n self.start_name = start_name\n self.start_address = start_address\n self.start_location = start_location\n self.route_add_instructions = route_add_instructions\n super(ExternallySourcedRouteStartStopDetails, self).__init__(stop_name, stop_location, stop_address, stop_deadline)\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n start_name = dictionary.get('startName')\n start_location = dictionary.get('startLocation')\n stop_name = dictionary.get('stopName')\n stop_location = dictionary.get('stopLocation')\n start_address = dictionary.get('startAddress')\n route_add_instructions = dictionary.get('routeAddInstructions')\n stop_address = dictionary.get('stopAddress')\n stop_deadline = dictionary.get('stopDeadline')\n return cls(start_name, start_location, stop_name, stop_location, start_address, route_add_instructions, stop_address, stop_deadline)\n", "source": "the_stack_v2_python_sparse", "source_path": "sdk/python/v0.1-rc.4/opentelematicsapi/models/externally_sourced_route_stop_details.py", "source_repo": "nmfta-repo/nmfta-opentelematics-prototype", "split": "test", "star_events_count": 2} {"blob_id": "b345a1dcc97a3f503fb4caee618f96f2578beac1", "bodies": ["sigma = 1.0\nsize = 5\nreal_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\nfor real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)", "sigma = 1.0\nsize = 6\nwith self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)"], "bodies_text": "<|body_start_0|>\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "utilsTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass utilsTest:\n\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_0|>\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000083", "length_bytes": 833, "license_type": "no_license", "methods": [{"docstring": "Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`", "name": "testDiscreteGaussian", "signature": "def testDiscreteGaussian(self)"}, {"docstring": "Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`", "name": "testDiscreteGaussianBadSize", "signature": "def testDiscreteGaussianBadSize(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_045309", "prompt": "Implement the Python class `utilsTest` described below.\n\nClass description:\nImplement the utilsTest class.\n\nMethod signatures and docstrings:\n- def testDiscreteGaussian(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\n- def testDiscreteGaussianBadSize(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`", "prompted_full_text": "Implement the Python class `utilsTest` described below.\n\nClass description:\nImplement the utilsTest class.\n\nMethod signatures and docstrings:\n- def testDiscreteGaussian(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\n- def testDiscreteGaussianBadSize(self): Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\n\n<|skeleton|>\nclass utilsTest:\n\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_0|>\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n<|end_body_0|>\n\n<|body_start_1|>\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n<|end_body_1|>\n", "revision_id": "39598b528fec061e828f64a3ded35aebeacb442e", "skeleton": "<|skeleton|>\nclass utilsTest:\n\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_0|>\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class utilsTest:\n def testDiscreteGaussian(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n sigma = 1.0\n size = 5\n real_values = 0.3678 * np.array([0.1357, 0.565, 1.266, 0.565, 0.1357])\n for real, computed in zip(real_values, utils.discrete_gaussian(size, sigma)):\n self.assertAlmostEqual(real, computed, 3)\n\n def testDiscreteGaussianBadSize(self):\n \"\"\"Tests versus values computed using `https://keisan.casio.com/exec/system/1180573473`\"\"\"\n sigma = 1.0\n size = 6\n with self.assertRaisesRegex(ValueError, '`size` must be odd.'):\n utils.discrete_gaussian(size, sigma)\n", "source": "the_stack_v2_python_sparse", "source_path": "simulation/utils_test.py", "source_repo": "chulab/super_resolution", "split": "test", "star_events_count": 3} {"blob_id": "3c1ae498137ca0bb073c5755c2674f823f34626c", "bodies": ["serv_air_quality: Service = self.add_preload_service(SERV_AIR_QUALITY_SENSOR, [CHAR_VOC_DENSITY])\nself.char_quality = serv_air_quality.configure_char(CHAR_AIR_QUALITY, value=0)\nself.char_density = serv_air_quality.configure_char(CHAR_VOC_DENSITY, value=0, properties={PROP_MIN_VALUE: 0, PROP_MAX_VALUE: 5000})", "density = convert_to_float(new_state.state)\nif density is None:\n return\nif self.char_density.value != density:\n self.char_density.set_value(density)\n _LOGGER.debug('%s: Set density to %d', self.entity_id, density)\nair_quality = density_to_air_quality_voc(density)\nif self.char_quality.value != air_quality:\n self.char_quality.set_value(air_quality)\n _LOGGER.debug('%s: Set air_quality to %d', self.entity_id, air_quality)"], "bodies_text": "<|body_start_0|>\n serv_air_quality: Service = self.add_preload_service(SERV_AIR_QUALITY_SENSOR, [CHAR_VOC_DENSITY])\n self.char_quality = serv_air_quality.configure_char(CHAR_AIR_QUALITY, value=0)\n self.char_density = serv_air_quality.configure_char(CHAR_VOC_DENSITY, value=0, properties={PROP_MIN_VALUE: 0, PROP_MAX_VALUE: 5000})\n<|end_body_0|>\n\n<|body_start_1|>\n density = convert_to_float(new_state.state)\n if density is None:\n return\n if self.char_density.value != density:\n self.char_density.set_value(density)\n _LOGGER.debug('%s: Set density to %d', self.entity_id, density)\n air_quality = density_to_air_quality_voc(density)\n if self.char_quality.value != air_quality:\n self.char_quality.set_value(air_quality)\n _LOGGER.debug('%s: Set air_quality to %d', self.entity_id, air_quality)\n<|end_body_1|>\n", "class_docstring": "Generate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.", "class_name": "VolatileOrganicCompoundsSensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VolatileOrganicCompoundsSensor:\n \"\"\"Generate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.\"\"\"\n\n def create_services(self):\n \"\"\"Override the init function for VOC Sensor.\"\"\"\n <|body_0|>\n\n def async_update_state(self, new_state):\n \"\"\"Update accessory after state change.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n serv_air_quality: Service = self.add_preload_service(SERV_AIR_QUALITY_SENSOR, [CHAR_VOC_DENSITY])\n self.char_quality = serv_air_quality.configure_char(CHAR_AIR_QUALITY, value=0)\n self.char_density = serv_air_quality.configure_char(CHAR_VOC_DENSITY, value=0, properties={PROP_MIN_VALUE: 0, PROP_MAX_VALUE: 5000})\n<|end_body_0|>\n\n<|body_start_1|>\n density = convert_to_float(new_state.state)\n if density is None:\n return\n if self.char_density.value != density:\n self.char_density.set_value(density)\n _LOGGER.debug('%s: Set density to %d', self.entity_id, density)\n air_quality = density_to_air_quality_voc(density)\n if self.char_quality.value != air_quality:\n self.char_quality.set_value(air_quality)\n _LOGGER.debug('%s: Set air_quality to %d', self.entity_id, air_quality)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000084", "length_bytes": 17041, "license_type": "permissive", "methods": [{"docstring": "Override the init function for VOC Sensor.", "name": "create_services", "signature": "def create_services(self)"}, {"docstring": "Update accessory after state change.", "name": "async_update_state", "signature": "def async_update_state(self, new_state)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002195", "prompt": "Implement the Python class `VolatileOrganicCompoundsSensor` described below.\n\nClass description:\nGenerate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.\n\nMethod signatures and docstrings:\n- def create_services(self): Override the init function for VOC Sensor.\n- def async_update_state(self, new_state): Update accessory after state change.", "prompted_full_text": "Implement the Python class `VolatileOrganicCompoundsSensor` described below.\n\nClass description:\nGenerate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.\n\nMethod signatures and docstrings:\n- def create_services(self): Override the init function for VOC Sensor.\n- def async_update_state(self, new_state): Update accessory after state change.\n\n<|skeleton|>\nclass VolatileOrganicCompoundsSensor:\n \"\"\"Generate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.\"\"\"\n\n def create_services(self):\n \"\"\"Override the init function for VOC Sensor.\"\"\"\n <|body_0|>\n\n def async_update_state(self, new_state):\n \"\"\"Update accessory after state change.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n serv_air_quality: Service = self.add_preload_service(SERV_AIR_QUALITY_SENSOR, [CHAR_VOC_DENSITY])\n self.char_quality = serv_air_quality.configure_char(CHAR_AIR_QUALITY, value=0)\n self.char_density = serv_air_quality.configure_char(CHAR_VOC_DENSITY, value=0, properties={PROP_MIN_VALUE: 0, PROP_MAX_VALUE: 5000})\n<|end_body_0|>\n\n<|body_start_1|>\n density = convert_to_float(new_state.state)\n if density is None:\n return\n if self.char_density.value != density:\n self.char_density.set_value(density)\n _LOGGER.debug('%s: Set density to %d', self.entity_id, density)\n air_quality = density_to_air_quality_voc(density)\n if self.char_quality.value != air_quality:\n self.char_quality.set_value(air_quality)\n _LOGGER.debug('%s: Set air_quality to %d', self.entity_id, air_quality)\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass VolatileOrganicCompoundsSensor:\n \"\"\"Generate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.\"\"\"\n\n def create_services(self):\n \"\"\"Override the init function for VOC Sensor.\"\"\"\n <|body_0|>\n\n def async_update_state(self, new_state):\n \"\"\"Update accessory after state change.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VolatileOrganicCompoundsSensor:\n \"\"\"Generate a VolatileOrganicCompoundsSensor accessory as VOCs sensor. Sensor entity must return VOC in µg/m3.\"\"\"\n\n def create_services(self):\n \"\"\"Override the init function for VOC Sensor.\"\"\"\n serv_air_quality: Service = self.add_preload_service(SERV_AIR_QUALITY_SENSOR, [CHAR_VOC_DENSITY])\n self.char_quality = serv_air_quality.configure_char(CHAR_AIR_QUALITY, value=0)\n self.char_density = serv_air_quality.configure_char(CHAR_VOC_DENSITY, value=0, properties={PROP_MIN_VALUE: 0, PROP_MAX_VALUE: 5000})\n\n def async_update_state(self, new_state):\n \"\"\"Update accessory after state change.\"\"\"\n density = convert_to_float(new_state.state)\n if density is None:\n return\n if self.char_density.value != density:\n self.char_density.set_value(density)\n _LOGGER.debug('%s: Set density to %d', self.entity_id, density)\n air_quality = density_to_air_quality_voc(density)\n if self.char_quality.value != air_quality:\n self.char_quality.set_value(air_quality)\n _LOGGER.debug('%s: Set air_quality to %d', self.entity_id, air_quality)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/homekit/type_sensors.py", "source_repo": "home-assistant/core", "split": "test", "star_events_count": 35501} {"blob_id": "a00d5ddef611e1319e745dbe5ae4f910280cd417", "bodies": ["super(VanillaEncoder, self).__init__()\nself.conv1 = PointNetConv2Layer(64, momentum)\nself.conv2 = PointNetConv2Layer(64, momentum)\nself.conv3 = PointNetConv2Layer(64, momentum)\nself.conv4 = PointNetConv2Layer(128, momentum)\nself.conv5 = PointNetConv2Layer(1024, momentum)", "x = tf.expand_dims(inputs, axis=2)\nx = self.conv1(x, training)\nx = self.conv2(x, training)\nx = self.conv3(x, training)\nx = self.conv4(x, training)\nx = self.conv5(x, training)\nx = tf.math.reduce_max(input_tensor=x, axis=1)\nreturn tf.squeeze(x)"], "bodies_text": "<|body_start_0|>\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.expand_dims(inputs, axis=2)\n x = self.conv1(x, training)\n x = self.conv2(x, training)\n x = self.conv3(x, training)\n x = self.conv4(x, training)\n x = self.conv5(x, training)\n x = tf.math.reduce_max(input_tensor=x, axis=1)\n return tf.squeeze(x)\n<|end_body_1|>\n", "class_docstring": "The Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py", "class_name": "VanillaEncoder", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VanillaEncoder:\n \"\"\"The Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py\"\"\"\n\n def __init__(self, momentum: float=0.5):\n \"\"\"Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.\"\"\"\n <|body_0|>\n\n def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor:\n \"\"\"Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.expand_dims(inputs, axis=2)\n x = self.conv1(x, training)\n x = self.conv2(x, training)\n x = self.conv3(x, training)\n x = self.conv4(x, training)\n x = self.conv5(x, training)\n x = tf.math.reduce_max(input_tensor=x, axis=1)\n return tf.squeeze(x)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000085", "length_bytes": 9332, "license_type": "permissive", "methods": [{"docstring": "Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.", "name": "__init__", "signature": "def __init__(self, momentum: float=0.5)"}, {"docstring": "Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`", "name": "call", "signature": "def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_014208", "prompt": "Implement the Python class `VanillaEncoder` described below.\n\nClass description:\nThe Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py\n\nMethod signatures and docstrings:\n- def __init__(self, momentum: float=0.5): Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.\n- def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor: Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`", "prompted_full_text": "Implement the Python class `VanillaEncoder` described below.\n\nClass description:\nThe Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py\n\nMethod signatures and docstrings:\n- def __init__(self, momentum: float=0.5): Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.\n- def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor: Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`\n\n<|skeleton|>\nclass VanillaEncoder:\n \"\"\"The Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py\"\"\"\n\n def __init__(self, momentum: float=0.5):\n \"\"\"Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.\"\"\"\n <|body_0|>\n\n def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor:\n \"\"\"Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.expand_dims(inputs, axis=2)\n x = self.conv1(x, training)\n x = self.conv2(x, training)\n x = self.conv3(x, training)\n x = self.conv4(x, training)\n x = self.conv5(x, training)\n x = tf.math.reduce_max(input_tensor=x, axis=1)\n return tf.squeeze(x)\n<|end_body_1|>\n", "revision_id": "1b0203eb538f2b6a1013ec7736d0d548416f059a", "skeleton": "<|skeleton|>\nclass VanillaEncoder:\n \"\"\"The Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py\"\"\"\n\n def __init__(self, momentum: float=0.5):\n \"\"\"Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.\"\"\"\n <|body_0|>\n\n def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor:\n \"\"\"Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VanillaEncoder:\n \"\"\"The Vanilla PointNet feature encoder. Consists of five conv2 layers with (64,64,64,128,1024) output channels. Note: PointNetConv2Layer are used instead of tf.keras.layers.Conv2D. https://github.com/charlesq34/pointnet/blob/master/models/pointnet_cls_basic.py\"\"\"\n\n def __init__(self, momentum: float=0.5):\n \"\"\"Constructs a VanillaEncoder keras layer. Args: momentum: the momentum used for the batch normalization layer.\"\"\"\n super(VanillaEncoder, self).__init__()\n self.conv1 = PointNetConv2Layer(64, momentum)\n self.conv2 = PointNetConv2Layer(64, momentum)\n self.conv3 = PointNetConv2Layer(64, momentum)\n self.conv4 = PointNetConv2Layer(128, momentum)\n self.conv5 = PointNetConv2Layer(1024, momentum)\n\n def call(self, inputs: tf.Tensor, training: Optional[bool]=None) -> tf.Tensor:\n \"\"\"Computes the PointNet features. Args: inputs: a dense tensor of size `[B,N,D]`. training: flag to control batch normalization update statistics. Returns: Tensor with shape `[B, N, C=1024]`\"\"\"\n x = tf.expand_dims(inputs, axis=2)\n x = self.conv1(x, training)\n x = self.conv2(x, training)\n x = self.conv3(x, training)\n x = self.conv4(x, training)\n x = self.conv5(x, training)\n x = tf.math.reduce_max(input_tensor=x, axis=1)\n return tf.squeeze(x)\n", "source": "the_stack_v2_python_sparse", "source_path": "tensorflow_graphics/nn/layer/pointnet.py", "source_repo": "tensorflow/graphics", "split": "test", "star_events_count": 2920} {"blob_id": "b063d4dfe47fe719c4d925b7a314d75885145a30", "bodies": ["self.timer = 0\nself.tweets = collections.defaultdict(list)\nself.followees = collections.defaultdict(set)\nself.max_tweets = 10", "self.timer += 1\nif len(self.tweets[userId]) == self.max_tweets:\n self.tweets[userId].pop(0)\nself.tweets[userId].append((self.timer, tweetId))", "ts = [i for i in self.tweets[userId]]\nfor uid in self.followees[userId]:\n ts.extend(self.tweets[uid])\nts.sort(key=lambda x: x[0], reverse=True)\nres = []\nfor i in range(min(self.max_tweets, len(ts))):\n res.append(ts[i][1])\nreturn res", "if followerId == followeeId:\n return\nself.followees[followerId].add(followeeId)", "if followerId == followeeId:\n return\nself.followees[followerId].discard(followeeId)"], "bodies_text": "<|body_start_0|>\n self.timer = 0\n self.tweets = collections.defaultdict(list)\n self.followees = collections.defaultdict(set)\n self.max_tweets = 10\n<|end_body_0|>\n\n<|body_start_1|>\n self.timer += 1\n if len(self.tweets[userId]) == self.max_tweets:\n self.tweets[userId].pop(0)\n self.tweets[userId].append((self.timer, tweetId))\n<|end_body_1|>\n\n<|body_start_2|>\n ts = [i for i in self.tweets[userId]]\n for uid in self.followees[userId]:\n ts.extend(self.tweets[uid])\n ts.sort(key=lambda x: x[0], reverse=True)\n res = []\n for i in range(min(self.max_tweets, len(ts))):\n res.append(ts[i][1])\n return res\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId == followeeId:\n return\n self.followees[followerId].add(followeeId)\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId == followeeId:\n return\n self.followees[followerId].discard(followeeId)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Twitter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.timer = 0\n self.tweets = collections.defaultdict(list)\n self.followees = collections.defaultdict(set)\n self.max_tweets = 10\n<|end_body_0|>\n\n<|body_start_1|>\n self.timer += 1\n if len(self.tweets[userId]) == self.max_tweets:\n self.tweets[userId].pop(0)\n self.tweets[userId].append((self.timer, tweetId))\n<|end_body_1|>\n\n<|body_start_2|>\n ts = [i for i in self.tweets[userId]]\n for uid in self.followees[userId]:\n ts.extend(self.tweets[uid])\n ts.sort(key=lambda x: x[0], reverse=True)\n res = []\n for i in range(min(self.max_tweets, len(ts))):\n res.append(ts[i][1])\n return res\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId == followeeId:\n return\n self.followees[followerId].add(followeeId)\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId == followeeId:\n return\n self.followees[followerId].discard(followeeId)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000086", "length_bytes": 3829, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Compose a new tweet.", "name": "postTweet", "signature": "def postTweet(self, userId: int, tweetId: int) -> None"}, {"docstring": "Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.", "name": "getNewsFeed", "signature": "def getNewsFeed(self, userId: int) -> List[int]"}, {"docstring": "Follower follows a followee. If the operation is invalid, it should be a no-op.", "name": "follow", "signature": "def follow(self, followerId: int, followeeId: int) -> None"}, {"docstring": "Follower unfollows a followee. If the operation is invalid, it should be a no-op.", "name": "unfollow", "signature": "def unfollow(self, followerId: int, followeeId: int) -> None"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_005014", "prompt": "Implement the Python class `Twitter` described below.\n\nClass description:\nImplement the Twitter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def postTweet(self, userId: int, tweetId: int) -> None: Compose a new tweet.\n- def getNewsFeed(self, userId: int) -> List[int]: Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\n- def follow(self, followerId: int, followeeId: int) -> None: Follower follows a followee. If the operation is invalid, it should be a no-op.\n- def unfollow(self, followerId: int, followeeId: int) -> None: Follower unfollows a followee. If the operation is invalid, it should be a no-op.", "prompted_full_text": "Implement the Python class `Twitter` described below.\n\nClass description:\nImplement the Twitter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def postTweet(self, userId: int, tweetId: int) -> None: Compose a new tweet.\n- def getNewsFeed(self, userId: int) -> List[int]: Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\n- def follow(self, followerId: int, followeeId: int) -> None: Follower follows a followee. If the operation is invalid, it should be a no-op.\n- def unfollow(self, followerId: int, followeeId: int) -> None: Follower unfollows a followee. If the operation is invalid, it should be a no-op.\n\n<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.timer = 0\n self.tweets = collections.defaultdict(list)\n self.followees = collections.defaultdict(set)\n self.max_tweets = 10\n<|end_body_0|>\n\n<|body_start_1|>\n self.timer += 1\n if len(self.tweets[userId]) == self.max_tweets:\n self.tweets[userId].pop(0)\n self.tweets[userId].append((self.timer, tweetId))\n<|end_body_1|>\n\n<|body_start_2|>\n ts = [i for i in self.tweets[userId]]\n for uid in self.followees[userId]:\n ts.extend(self.tweets[uid])\n ts.sort(key=lambda x: x[0], reverse=True)\n res = []\n for i in range(min(self.max_tweets, len(ts))):\n res.append(ts[i][1])\n return res\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId == followeeId:\n return\n self.followees[followerId].add(followeeId)\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId == followeeId:\n return\n self.followees[followerId].discard(followeeId)\n<|end_body_4|>\n", "revision_id": "a390adeeb71e997b3c1a56c479825d4adda07ef9", "skeleton": "<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Twitter:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.timer = 0\n self.tweets = collections.defaultdict(list)\n self.followees = collections.defaultdict(set)\n self.max_tweets = 10\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n self.timer += 1\n if len(self.tweets[userId]) == self.max_tweets:\n self.tweets[userId].pop(0)\n self.tweets[userId].append((self.timer, tweetId))\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n ts = [i for i in self.tweets[userId]]\n for uid in self.followees[userId]:\n ts.extend(self.tweets[uid])\n ts.sort(key=lambda x: x[0], reverse=True)\n res = []\n for i in range(min(self.max_tweets, len(ts))):\n res.append(ts[i][1])\n return res\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n if followerId == followeeId:\n return\n self.followees[followerId].add(followeeId)\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n if followerId == followeeId:\n return\n self.followees[followerId].discard(followeeId)\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithms/python/designTwitter/designTwitter.py", "source_repo": "MichelleZ/leetcode", "split": "test", "star_events_count": 3} {"blob_id": "5740fe675b1763237df55ffb8ac5d440d248d054", "bodies": ["row, col = ([0] * n, [0] * n)\ndiag, anti_diag = ([0] * (2 * n), [0] * (2 * n))\n\ndef dfs(x, y, s, n, path, res):\n if y == n:\n x += 1\n y = 0\n if x == n:\n if s == n:\n res.append(copy.deepcopy(path))\n return\n if not row[x] and (not col[y]) and (not diag[x + y]) and (not anti_diag[n - 1 - x + y]):\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 1\n path[x][y] = 'Q'\n dfs(x, y + 1, s + 1, n, path, res)\n path[x][y] = '.'\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 0\n dfs(x, y + 1, s, n, path, res)\nres = []\npath = [['.'] * n for _ in range(n)]\ndfs(0, 0, 0, n, path, res)\nreturn [[''.join(y) for y in x] for x in res]", "ans = []\nqueens = [-1] * n\ncolumns = [True] * n + [False]\nback = [True] * n * 2\nforward = [True] * n * 2\nrow = col = 0\nwhile True:\n if columns[col] and back[col - row + n] and forward[col + row]:\n queens[row] = col\n columns[col] = back[col - row + n] = forward[col + row] = False\n row += 1\n col = 0\n if row == n:\n ans.append(['.' * q + 'Q' + '.' * (n - q - 1) for q in queens])\n else:\n if row == n or col == n:\n if row == 0:\n return ans\n row -= 1\n col = queens[row]\n columns[col] = back[col - row + n] = forward[col + row] = True\n col += 1"], "bodies_text": "<|body_start_0|>\n row, col = ([0] * n, [0] * n)\n diag, anti_diag = ([0] * (2 * n), [0] * (2 * n))\n\n def dfs(x, y, s, n, path, res):\n if y == n:\n x += 1\n y = 0\n if x == n:\n if s == n:\n res.append(copy.deepcopy(path))\n return\n if not row[x] and (not col[y]) and (not diag[x + y]) and (not anti_diag[n - 1 - x + y]):\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 1\n path[x][y] = 'Q'\n dfs(x, y + 1, s + 1, n, path, res)\n path[x][y] = '.'\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 0\n dfs(x, y + 1, s, n, path, res)\n res = []\n path = [['.'] * n for _ in range(n)]\n dfs(0, 0, 0, n, path, res)\n return [[''.join(y) for y in x] for x in res]\n<|end_body_0|>\n\n<|body_start_1|>\n ans = []\n queens = [-1] * n\n columns = [True] * n + [False]\n back = [True] * n * 2\n forward = [True] * n * 2\n row = col = 0\n while True:\n if columns[col] and back[col - row + n] and forward[col + row]:\n queens[row] = col\n columns[col] = back[col - row + n] = forward[col + row] = False\n row += 1\n col = 0\n if row == n:\n ans.append(['.' * q + 'Q' + '.' * (n - q - 1) for q in queens])\n else:\n if row == n or col == n:\n if row == 0:\n return ans\n row -= 1\n col = queens[row]\n columns[col] = back[col - row + n] = forward[col + row] = True\n col += 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def solveNQueens(self, n):\n \"\"\"回溯 :type n: int :rtype: List[List[str]]\"\"\"\n <|body_0|>\n\n def solveNQueens2(self, n):\n \"\"\"https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n row, col = ([0] * n, [0] * n)\n diag, anti_diag = ([0] * (2 * n), [0] * (2 * n))\n\n def dfs(x, y, s, n, path, res):\n if y == n:\n x += 1\n y = 0\n if x == n:\n if s == n:\n res.append(copy.deepcopy(path))\n return\n if not row[x] and (not col[y]) and (not diag[x + y]) and (not anti_diag[n - 1 - x + y]):\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 1\n path[x][y] = 'Q'\n dfs(x, y + 1, s + 1, n, path, res)\n path[x][y] = '.'\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 0\n dfs(x, y + 1, s, n, path, res)\n res = []\n path = [['.'] * n for _ in range(n)]\n dfs(0, 0, 0, n, path, res)\n return [[''.join(y) for y in x] for x in res]\n<|end_body_0|>\n\n<|body_start_1|>\n ans = []\n queens = [-1] * n\n columns = [True] * n + [False]\n back = [True] * n * 2\n forward = [True] * n * 2\n row = col = 0\n while True:\n if columns[col] and back[col - row + n] and forward[col + row]:\n queens[row] = col\n columns[col] = back[col - row + n] = forward[col + row] = False\n row += 1\n col = 0\n if row == n:\n ans.append(['.' * q + 'Q' + '.' * (n - q - 1) for q in queens])\n else:\n if row == n or col == n:\n if row == 0:\n return ans\n row -= 1\n col = queens[row]\n columns[col] = back[col - row + n] = forward[col + row] = True\n col += 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000087", "length_bytes": 3466, "license_type": "no_license", "methods": [{"docstring": "回溯 :type n: int :rtype: List[List[str]]", "name": "solveNQueens", "signature": "def solveNQueens(self, n)"}, {"docstring": "https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:", "name": "solveNQueens2", "signature": "def solveNQueens2(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034795", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def solveNQueens(self, n): 回溯 :type n: int :rtype: List[List[str]]\n- def solveNQueens2(self, n): https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def solveNQueens(self, n): 回溯 :type n: int :rtype: List[List[str]]\n- def solveNQueens2(self, n): https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:\n\n<|skeleton|>\nclass Solution:\n\n def solveNQueens(self, n):\n \"\"\"回溯 :type n: int :rtype: List[List[str]]\"\"\"\n <|body_0|>\n\n def solveNQueens2(self, n):\n \"\"\"https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n row, col = ([0] * n, [0] * n)\n diag, anti_diag = ([0] * (2 * n), [0] * (2 * n))\n\n def dfs(x, y, s, n, path, res):\n if y == n:\n x += 1\n y = 0\n if x == n:\n if s == n:\n res.append(copy.deepcopy(path))\n return\n if not row[x] and (not col[y]) and (not diag[x + y]) and (not anti_diag[n - 1 - x + y]):\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 1\n path[x][y] = 'Q'\n dfs(x, y + 1, s + 1, n, path, res)\n path[x][y] = '.'\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 0\n dfs(x, y + 1, s, n, path, res)\n res = []\n path = [['.'] * n for _ in range(n)]\n dfs(0, 0, 0, n, path, res)\n return [[''.join(y) for y in x] for x in res]\n<|end_body_0|>\n\n<|body_start_1|>\n ans = []\n queens = [-1] * n\n columns = [True] * n + [False]\n back = [True] * n * 2\n forward = [True] * n * 2\n row = col = 0\n while True:\n if columns[col] and back[col - row + n] and forward[col + row]:\n queens[row] = col\n columns[col] = back[col - row + n] = forward[col + row] = False\n row += 1\n col = 0\n if row == n:\n ans.append(['.' * q + 'Q' + '.' * (n - q - 1) for q in queens])\n else:\n if row == n or col == n:\n if row == 0:\n return ans\n row -= 1\n col = queens[row]\n columns[col] = back[col - row + n] = forward[col + row] = True\n col += 1\n<|end_body_1|>\n", "revision_id": "5d3574ccd282d0146c83c286ae28d8baaabd4910", "skeleton": "<|skeleton|>\nclass Solution:\n\n def solveNQueens(self, n):\n \"\"\"回溯 :type n: int :rtype: List[List[str]]\"\"\"\n <|body_0|>\n\n def solveNQueens2(self, n):\n \"\"\"https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def solveNQueens(self, n):\n \"\"\"回溯 :type n: int :rtype: List[List[str]]\"\"\"\n row, col = ([0] * n, [0] * n)\n diag, anti_diag = ([0] * (2 * n), [0] * (2 * n))\n\n def dfs(x, y, s, n, path, res):\n if y == n:\n x += 1\n y = 0\n if x == n:\n if s == n:\n res.append(copy.deepcopy(path))\n return\n if not row[x] and (not col[y]) and (not diag[x + y]) and (not anti_diag[n - 1 - x + y]):\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 1\n path[x][y] = 'Q'\n dfs(x, y + 1, s + 1, n, path, res)\n path[x][y] = '.'\n row[x] = col[y] = diag[x + y] = anti_diag[n - 1 - x + y] = 0\n dfs(x, y + 1, s, n, path, res)\n res = []\n path = [['.'] * n for _ in range(n)]\n dfs(0, 0, 0, n, path, res)\n return [[''.join(y) for y in x] for x in res]\n\n def solveNQueens2(self, n):\n \"\"\"https://leetcode.com/problems/n-queens/discuss/19937/AC-Python-76-ms-iterative-backtracking-solution :param n: :return:\"\"\"\n ans = []\n queens = [-1] * n\n columns = [True] * n + [False]\n back = [True] * n * 2\n forward = [True] * n * 2\n row = col = 0\n while True:\n if columns[col] and back[col - row + n] and forward[col + row]:\n queens[row] = col\n columns[col] = back[col - row + n] = forward[col + row] = False\n row += 1\n col = 0\n if row == n:\n ans.append(['.' * q + 'Q' + '.' * (n - q - 1) for q in queens])\n else:\n if row == n or col == n:\n if row == 0:\n return ans\n row -= 1\n col = queens[row]\n columns[col] = back[col - row + n] = forward[col + row] = True\n col += 1\n", "source": "the_stack_v2_python_sparse", "source_path": "51_N皇后.py", "source_repo": "lovehhf/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "4ae1bfd0f4ffa051b92d3188993bb0d667edc03a", "bodies": ["info = {}\ntry:\n if obj.teacher:\n info['teacher'] = obj.teacher.pen_name\nexcept Sensei.DoesNotExist as e:\n info['teacher'] = str(e)\ntry:\n info_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n info_problems[value.pk] = value.get_data()\n info['problems'] = info_problems\nexcept ProblemInstance.DoesNotExist as e:\n info['problems'] = str(e)\nreturn info", "link = OrderedDict({})\ntry:\n if obj.teacher:\n link['teacher'] = urljoin(reverse('v1:classroom:sensei-list', request=self.context['request']), str(obj.teacher.pk))\nexcept Sensei.DoesNotExist as e:\n link['teacher'] = str(e)\ntry:\n link_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n link_problems[value.pk] = urljoin(reverse('v1:problem:problem-instance-list', request=self.context['request']), str(value.pk))\n link['problems'] = link_problems\nexcept ProblemInstance.DoesNotExist as e:\n link['problems'] = str(e)\nreturn link"], "bodies_text": "<|body_start_0|>\n info = {}\n try:\n if obj.teacher:\n info['teacher'] = obj.teacher.pen_name\n except Sensei.DoesNotExist as e:\n info['teacher'] = str(e)\n try:\n info_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n info_problems[value.pk] = value.get_data()\n info['problems'] = info_problems\n except ProblemInstance.DoesNotExist as e:\n info['problems'] = str(e)\n return info\n<|end_body_0|>\n\n<|body_start_1|>\n link = OrderedDict({})\n try:\n if obj.teacher:\n link['teacher'] = urljoin(reverse('v1:classroom:sensei-list', request=self.context['request']), str(obj.teacher.pk))\n except Sensei.DoesNotExist as e:\n link['teacher'] = str(e)\n try:\n link_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n link_problems[value.pk] = urljoin(reverse('v1:problem:problem-instance-list', request=self.context['request']), str(value.pk))\n link['problems'] = link_problems\n except ProblemInstance.DoesNotExist as e:\n link['problems'] = str(e)\n return link\n<|end_body_1|>\n", "class_docstring": "Serialize the Exam Problem with link and info.", "class_name": "ExamProblemsSerializers", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExamProblemsSerializers:\n \"\"\"Serialize the Exam Problem with link and info.\"\"\"\n\n def get_info_data(self, obj, *args, **kwargs):\n \"\"\"Get information data. :param obj: :param args: :param kwargs: :return:\"\"\"\n <|body_0|>\n\n def get_links_url(self, obj, *args, **kwargs):\n \"\"\"Get link url :param obj: :param args: :param kwargs: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n info = {}\n try:\n if obj.teacher:\n info['teacher'] = obj.teacher.pen_name\n except Sensei.DoesNotExist as e:\n info['teacher'] = str(e)\n try:\n info_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n info_problems[value.pk] = value.get_data()\n info['problems'] = info_problems\n except ProblemInstance.DoesNotExist as e:\n info['problems'] = str(e)\n return info\n<|end_body_0|>\n\n<|body_start_1|>\n link = OrderedDict({})\n try:\n if obj.teacher:\n link['teacher'] = urljoin(reverse('v1:classroom:sensei-list', request=self.context['request']), str(obj.teacher.pk))\n except Sensei.DoesNotExist as e:\n link['teacher'] = str(e)\n try:\n link_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n link_problems[value.pk] = urljoin(reverse('v1:problem:problem-instance-list', request=self.context['request']), str(value.pk))\n link['problems'] = link_problems\n except ProblemInstance.DoesNotExist as e:\n link['problems'] = str(e)\n return link\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000088", "length_bytes": 7433, "license_type": "no_license", "methods": [{"docstring": "Get information data. :param obj: :param args: :param kwargs: :return:", "name": "get_info_data", "signature": "def get_info_data(self, obj, *args, **kwargs)"}, {"docstring": "Get link url :param obj: :param args: :param kwargs: :return:", "name": "get_links_url", "signature": "def get_links_url(self, obj, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_023405", "prompt": "Implement the Python class `ExamProblemsSerializers` described below.\n\nClass description:\nSerialize the Exam Problem with link and info.\n\nMethod signatures and docstrings:\n- def get_info_data(self, obj, *args, **kwargs): Get information data. :param obj: :param args: :param kwargs: :return:\n- def get_links_url(self, obj, *args, **kwargs): Get link url :param obj: :param args: :param kwargs: :return:", "prompted_full_text": "Implement the Python class `ExamProblemsSerializers` described below.\n\nClass description:\nSerialize the Exam Problem with link and info.\n\nMethod signatures and docstrings:\n- def get_info_data(self, obj, *args, **kwargs): Get information data. :param obj: :param args: :param kwargs: :return:\n- def get_links_url(self, obj, *args, **kwargs): Get link url :param obj: :param args: :param kwargs: :return:\n\n<|skeleton|>\nclass ExamProblemsSerializers:\n \"\"\"Serialize the Exam Problem with link and info.\"\"\"\n\n def get_info_data(self, obj, *args, **kwargs):\n \"\"\"Get information data. :param obj: :param args: :param kwargs: :return:\"\"\"\n <|body_0|>\n\n def get_links_url(self, obj, *args, **kwargs):\n \"\"\"Get link url :param obj: :param args: :param kwargs: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n info = {}\n try:\n if obj.teacher:\n info['teacher'] = obj.teacher.pen_name\n except Sensei.DoesNotExist as e:\n info['teacher'] = str(e)\n try:\n info_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n info_problems[value.pk] = value.get_data()\n info['problems'] = info_problems\n except ProblemInstance.DoesNotExist as e:\n info['problems'] = str(e)\n return info\n<|end_body_0|>\n\n<|body_start_1|>\n link = OrderedDict({})\n try:\n if obj.teacher:\n link['teacher'] = urljoin(reverse('v1:classroom:sensei-list', request=self.context['request']), str(obj.teacher.pk))\n except Sensei.DoesNotExist as e:\n link['teacher'] = str(e)\n try:\n link_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n link_problems[value.pk] = urljoin(reverse('v1:problem:problem-instance-list', request=self.context['request']), str(value.pk))\n link['problems'] = link_problems\n except ProblemInstance.DoesNotExist as e:\n link['problems'] = str(e)\n return link\n<|end_body_1|>\n", "revision_id": "acd31a2f43d7ea83fc9bb34627f5dca94763eade", "skeleton": "<|skeleton|>\nclass ExamProblemsSerializers:\n \"\"\"Serialize the Exam Problem with link and info.\"\"\"\n\n def get_info_data(self, obj, *args, **kwargs):\n \"\"\"Get information data. :param obj: :param args: :param kwargs: :return:\"\"\"\n <|body_0|>\n\n def get_links_url(self, obj, *args, **kwargs):\n \"\"\"Get link url :param obj: :param args: :param kwargs: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExamProblemsSerializers:\n \"\"\"Serialize the Exam Problem with link and info.\"\"\"\n\n def get_info_data(self, obj, *args, **kwargs):\n \"\"\"Get information data. :param obj: :param args: :param kwargs: :return:\"\"\"\n info = {}\n try:\n if obj.teacher:\n info['teacher'] = obj.teacher.pen_name\n except Sensei.DoesNotExist as e:\n info['teacher'] = str(e)\n try:\n info_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n info_problems[value.pk] = value.get_data()\n info['problems'] = info_problems\n except ProblemInstance.DoesNotExist as e:\n info['problems'] = str(e)\n return info\n\n def get_links_url(self, obj, *args, **kwargs):\n \"\"\"Get link url :param obj: :param args: :param kwargs: :return:\"\"\"\n link = OrderedDict({})\n try:\n if obj.teacher:\n link['teacher'] = urljoin(reverse('v1:classroom:sensei-list', request=self.context['request']), str(obj.teacher.pk))\n except Sensei.DoesNotExist as e:\n link['teacher'] = str(e)\n try:\n link_problems = OrderedDict({})\n for index, value in enumerate(obj.problems.all()):\n link_problems[value.pk] = urljoin(reverse('v1:problem:problem-instance-list', request=self.context['request']), str(value.pk))\n link['problems'] = link_problems\n except ProblemInstance.DoesNotExist as e:\n link['problems'] = str(e)\n return link\n", "source": "the_stack_v2_python_sparse", "source_path": "classroom/serializers.py", "source_repo": "JoenyBui/mywaterbuffalo", "split": "test", "star_events_count": 0} {"blob_id": "c4d71726925b730ae21e18e16d7e4f1d8e6cb5c8", "bodies": ["self.ensure_one()\nfor service in self.services:\n if service.default:\n return service\nraise exceptions.Warning(_('Service error'), _(\"API doesn't have default service\"))", "for carrier_api in self:\n test = getattr(self, 'test_%s' % self.method)\n test()"], "bodies_text": "<|body_start_0|>\n self.ensure_one()\n for service in self.services:\n if service.default:\n return service\n raise exceptions.Warning(_('Service error'), _(\"API doesn't have default service\"))\n<|end_body_0|>\n\n<|body_start_1|>\n for carrier_api in self:\n test = getattr(self, 'test_%s' % self.method)\n test()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CarrierApi", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CarrierApi:\n\n def get_default_carrier_service(self):\n \"\"\"Get default service API\"\"\"\n <|body_0|>\n\n def test_connection(self):\n \"\"\"Test connection Carrier API - Webservices Call method test_methodname\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ensure_one()\n for service in self.services:\n if service.default:\n return service\n raise exceptions.Warning(_('Service error'), _(\"API doesn't have default service\"))\n<|end_body_0|>\n\n<|body_start_1|>\n for carrier_api in self:\n test = getattr(self, 'test_%s' % self.method)\n test()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000089", "length_bytes": 2293, "license_type": "no_license", "methods": [{"docstring": "Get default service API", "name": "get_default_carrier_service", "signature": "def get_default_carrier_service(self)"}, {"docstring": "Test connection Carrier API - Webservices Call method test_methodname", "name": "test_connection", "signature": "def test_connection(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034237", "prompt": "Implement the Python class `CarrierApi` described below.\n\nClass description:\nImplement the CarrierApi class.\n\nMethod signatures and docstrings:\n- def get_default_carrier_service(self): Get default service API\n- def test_connection(self): Test connection Carrier API - Webservices Call method test_methodname", "prompted_full_text": "Implement the Python class `CarrierApi` described below.\n\nClass description:\nImplement the CarrierApi class.\n\nMethod signatures and docstrings:\n- def get_default_carrier_service(self): Get default service API\n- def test_connection(self): Test connection Carrier API - Webservices Call method test_methodname\n\n<|skeleton|>\nclass CarrierApi:\n\n def get_default_carrier_service(self):\n \"\"\"Get default service API\"\"\"\n <|body_0|>\n\n def test_connection(self):\n \"\"\"Test connection Carrier API - Webservices Call method test_methodname\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ensure_one()\n for service in self.services:\n if service.default:\n return service\n raise exceptions.Warning(_('Service error'), _(\"API doesn't have default service\"))\n<|end_body_0|>\n\n<|body_start_1|>\n for carrier_api in self:\n test = getattr(self, 'test_%s' % self.method)\n test()\n<|end_body_1|>\n", "revision_id": "636bc89a84f12c163c31204bc19f4e0271546017", "skeleton": "<|skeleton|>\nclass CarrierApi:\n\n def get_default_carrier_service(self):\n \"\"\"Get default service API\"\"\"\n <|body_0|>\n\n def test_connection(self):\n \"\"\"Test connection Carrier API - Webservices Call method test_methodname\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CarrierApi:\n def get_default_carrier_service(self):\n \"\"\"Get default service API\"\"\"\n self.ensure_one()\n for service in self.services:\n if service.default:\n return service\n raise exceptions.Warning(_('Service error'), _(\"API doesn't have default service\"))\n\n def test_connection(self):\n \"\"\"Test connection Carrier API - Webservices Call method test_methodname\"\"\"\n for carrier_api in self:\n test = getattr(self, 'test_%s' % self.method)\n test()\n", "source": "the_stack_v2_python_sparse", "source_path": "project-addons/carrier_api/models/carrier_api.py", "source_repo": "Comunitea/CMNT_00056_2016_BT", "split": "test", "star_events_count": 0} {"blob_id": "fe94c2555678e570370a0f17b0475316cca6a056", "bodies": ["self.body = body\nchains = list(body.split_by_chain())\nself.start_of_chain = {i + 1: sum((len(c) for c in chains[:i])) for i in range(len(chains))}\nself.end_of_chain = {i + 1: sum((len(c) for c in chains[:i + 1])) for i in range(len(chains))}\nself.start_of_chain[None] = 0\nself.chains = {i + 1: c for i, c in enumerate(chains)}\nself.bodyid = bodyid\nif callable(sites):\n sites = sites(body)\nif isinstance(sites, SpliceSite):\n sites = [sites]\nself.sites = list(sites)\nfor i, site in enumerate(self.sites):\n if isinstance(site, str):\n raise ValueError('site currently must be (sele, polarity)')\n if not isinstance(site, SpliceSite):\n if isinstance(site, dict):\n self.sites[i] = SpliceSite(**site)\n else:\n if not isinstance(site, Iterable):\n self.sites[i] = (site,)\n self.sites[i] = SpliceSite(*site)\nself.nsite = dict(N=0, C=0)\nfor s in self.sites:\n self.nsite[s.polarity] += 1\nself.min_seg_len = min_seg_len\nself._resids_list = [site._resids(self) for site in self.sites]\nself._len_body = len(body)\nself._chains = np.array([body.chain(i + 1) for i in range(len(body))])\nself.allowed_pairs = allowed_pairs", "if isite < 0:\n return [None]\nreturn self._resids_list[isite]", "resid_subset = set()\nfor i in range(len(self.sites)):\n resid_subset |= set(self._resids_list[i])\nresid_subset = np.array(list(resid_subset))\nN = self._len_body + 1\nval, idx = np.where(0 == np.arange(N)[np.newaxis, :] - resid_subset[:, np.newaxis])\nto_subset = np.array(N * [-1])\nto_subset[idx] = val\nassert np.all(to_subset[resid_subset] == np.arange(len(resid_subset)))\nreturn (resid_subset, to_subset)", "if ires < 0 or jres < 0:\n return True\nassert 0 < ires <= self._len_body and 0 < jres <= self._len_body\nichain, jchain = (self._chains[ires - 1], self._chains[jres - 1])\nif ichain == jchain:\n ipol = self.sites[isite].polarity\n jpol = self.sites[jsite].polarity\n if ipol == jpol:\n return False\n if ipol == 'N':\n seglen = jres - ires + 1\n else:\n seglen = ires - jres + 1\n if seglen < self.min_seg_len:\n return False\nreturn True", "if isite == jsite:\n return False\nif isite < 0 or jsite < 0:\n return True\nif self.allowed_pairs is not None and (isite, jsite) not in self.allowed_pairs:\n return False\nreturn True", "sites = str([(s._resids(self), s.polarity) for s in self.sites])\nif len(sites) > 30:\n sites = sites[:30] + '...'\nreturn 'Spliceable: body=(' + str(self._len_body) + ',' + str(self.body).splitlines()[0].split('/')[-1] + '), sites=' + sites"], "bodies_text": "<|body_start_0|>\n self.body = body\n chains = list(body.split_by_chain())\n self.start_of_chain = {i + 1: sum((len(c) for c in chains[:i])) for i in range(len(chains))}\n self.end_of_chain = {i + 1: sum((len(c) for c in chains[:i + 1])) for i in range(len(chains))}\n self.start_of_chain[None] = 0\n self.chains = {i + 1: c for i, c in enumerate(chains)}\n self.bodyid = bodyid\n if callable(sites):\n sites = sites(body)\n if isinstance(sites, SpliceSite):\n sites = [sites]\n self.sites = list(sites)\n for i, site in enumerate(self.sites):\n if isinstance(site, str):\n raise ValueError('site currently must be (sele, polarity)')\n if not isinstance(site, SpliceSite):\n if isinstance(site, dict):\n self.sites[i] = SpliceSite(**site)\n else:\n if not isinstance(site, Iterable):\n self.sites[i] = (site,)\n self.sites[i] = SpliceSite(*site)\n self.nsite = dict(N=0, C=0)\n for s in self.sites:\n self.nsite[s.polarity] += 1\n self.min_seg_len = min_seg_len\n self._resids_list = [site._resids(self) for site in self.sites]\n self._len_body = len(body)\n self._chains = np.array([body.chain(i + 1) for i in range(len(body))])\n self.allowed_pairs = allowed_pairs\n<|end_body_0|>\n\n<|body_start_1|>\n if isite < 0:\n return [None]\n return self._resids_list[isite]\n<|end_body_1|>\n\n<|body_start_2|>\n resid_subset = set()\n for i in range(len(self.sites)):\n resid_subset |= set(self._resids_list[i])\n resid_subset = np.array(list(resid_subset))\n N = self._len_body + 1\n val, idx = np.where(0 == np.arange(N)[np.newaxis, :] - resid_subset[:, np.newaxis])\n to_subset = np.array(N * [-1])\n to_subset[idx] = val\n assert np.all(to_subset[resid_subset] == np.arange(len(resid_subset)))\n return (resid_subset, to_subset)\n<|end_body_2|>\n\n<|body_start_3|>\n if ires < 0 or jres < 0:\n return True\n assert 0 < ires <= self._len_body and 0 < jres <= self._len_body\n ichain, jchain = (self._chains[ires - 1], self._chains[jres - 1])\n if ichain == jchain:\n ipol = self.sites[isite].polarity\n jpol = self.sites[jsite].polarity\n if ipol == jpol:\n return False\n if ipol == 'N':\n seglen = jres - ires + 1\n else:\n seglen = ires - jres + 1\n if seglen < self.min_seg_len:\n return False\n return True\n<|end_body_3|>\n\n<|body_start_4|>\n if isite == jsite:\n return False\n if isite < 0 or jsite < 0:\n return True\n if self.allowed_pairs is not None and (isite, jsite) not in self.allowed_pairs:\n return False\n return True\n<|end_body_4|>\n\n<|body_start_5|>\n sites = str([(s._resids(self), s.polarity) for s in self.sites])\n if len(sites) > 30:\n sites = sites[:30] + '...'\n return 'Spliceable: body=(' + str(self._len_body) + ',' + str(self.body).splitlines()[0].split('/')[-1] + '), sites=' + sites\n<|end_body_5|>\n", "class_docstring": "TODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description", "class_name": "Spliceable", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Spliceable:\n \"\"\"TODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description\"\"\"\n\n def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None):\n \"\"\"TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description\"\"\"\n <|body_0|>\n\n def resids(self, isite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_1|>\n\n def spliceable_positions(self):\n \"\"\"selection of resids, and map 'global' index to selected index Returns: TYPE: Description\"\"\"\n <|body_2|>\n\n def is_compatible(self, isite, ires, jsite, jres):\n \"\"\"TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_3|>\n\n def sitepair_allowed(self, isite, jsite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_4|>\n\n def __repr__(self):\n \"\"\"TODO: Summary Returns: TYPE: Description\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.body = body\n chains = list(body.split_by_chain())\n self.start_of_chain = {i + 1: sum((len(c) for c in chains[:i])) for i in range(len(chains))}\n self.end_of_chain = {i + 1: sum((len(c) for c in chains[:i + 1])) for i in range(len(chains))}\n self.start_of_chain[None] = 0\n self.chains = {i + 1: c for i, c in enumerate(chains)}\n self.bodyid = bodyid\n if callable(sites):\n sites = sites(body)\n if isinstance(sites, SpliceSite):\n sites = [sites]\n self.sites = list(sites)\n for i, site in enumerate(self.sites):\n if isinstance(site, str):\n raise ValueError('site currently must be (sele, polarity)')\n if not isinstance(site, SpliceSite):\n if isinstance(site, dict):\n self.sites[i] = SpliceSite(**site)\n else:\n if not isinstance(site, Iterable):\n self.sites[i] = (site,)\n self.sites[i] = SpliceSite(*site)\n self.nsite = dict(N=0, C=0)\n for s in self.sites:\n self.nsite[s.polarity] += 1\n self.min_seg_len = min_seg_len\n self._resids_list = [site._resids(self) for site in self.sites]\n self._len_body = len(body)\n self._chains = np.array([body.chain(i + 1) for i in range(len(body))])\n self.allowed_pairs = allowed_pairs\n<|end_body_0|>\n\n<|body_start_1|>\n if isite < 0:\n return [None]\n return self._resids_list[isite]\n<|end_body_1|>\n\n<|body_start_2|>\n resid_subset = set()\n for i in range(len(self.sites)):\n resid_subset |= set(self._resids_list[i])\n resid_subset = np.array(list(resid_subset))\n N = self._len_body + 1\n val, idx = np.where(0 == np.arange(N)[np.newaxis, :] - resid_subset[:, np.newaxis])\n to_subset = np.array(N * [-1])\n to_subset[idx] = val\n assert np.all(to_subset[resid_subset] == np.arange(len(resid_subset)))\n return (resid_subset, to_subset)\n<|end_body_2|>\n\n<|body_start_3|>\n if ires < 0 or jres < 0:\n return True\n assert 0 < ires <= self._len_body and 0 < jres <= self._len_body\n ichain, jchain = (self._chains[ires - 1], self._chains[jres - 1])\n if ichain == jchain:\n ipol = self.sites[isite].polarity\n jpol = self.sites[jsite].polarity\n if ipol == jpol:\n return False\n if ipol == 'N':\n seglen = jres - ires + 1\n else:\n seglen = ires - jres + 1\n if seglen < self.min_seg_len:\n return False\n return True\n<|end_body_3|>\n\n<|body_start_4|>\n if isite == jsite:\n return False\n if isite < 0 or jsite < 0:\n return True\n if self.allowed_pairs is not None and (isite, jsite) not in self.allowed_pairs:\n return False\n return True\n<|end_body_4|>\n\n<|body_start_5|>\n sites = str([(s._resids(self), s.polarity) for s in self.sites])\n if len(sites) > 30:\n sites = sites[:30] + '...'\n return 'Spliceable: body=(' + str(self._len_body) + ',' + str(self.body).splitlines()[0].split('/')[-1] + '), sites=' + sites\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000090", "length_bytes": 32245, "license_type": "permissive", "methods": [{"docstring": "TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description", "name": "__init__", "signature": "def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None)"}, {"docstring": "TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description", "name": "resids", "signature": "def resids(self, isite)"}, {"docstring": "selection of resids, and map 'global' index to selected index Returns: TYPE: Description", "name": "spliceable_positions", "signature": "def spliceable_positions(self)"}, {"docstring": "TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description", "name": "is_compatible", "signature": "def is_compatible(self, isite, ires, jsite, jres)"}, {"docstring": "TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description", "name": "sitepair_allowed", "signature": "def sitepair_allowed(self, isite, jsite)"}, {"docstring": "TODO: Summary Returns: TYPE: Description", "name": "__repr__", "signature": "def __repr__(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_015118", "prompt": "Implement the Python class `Spliceable` described below.\n\nClass description:\nTODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description\n\nMethod signatures and docstrings:\n- def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None): TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description\n- def resids(self, isite): TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description\n- def spliceable_positions(self): selection of resids, and map 'global' index to selected index Returns: TYPE: Description\n- def is_compatible(self, isite, ires, jsite, jres): TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description\n- def sitepair_allowed(self, isite, jsite): TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description\n- def __repr__(self): TODO: Summary Returns: TYPE: Description", "prompted_full_text": "Implement the Python class `Spliceable` described below.\n\nClass description:\nTODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description\n\nMethod signatures and docstrings:\n- def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None): TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description\n- def resids(self, isite): TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description\n- def spliceable_positions(self): selection of resids, and map 'global' index to selected index Returns: TYPE: Description\n- def is_compatible(self, isite, ires, jsite, jres): TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description\n- def sitepair_allowed(self, isite, jsite): TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description\n- def __repr__(self): TODO: Summary Returns: TYPE: Description\n\n<|skeleton|>\nclass Spliceable:\n \"\"\"TODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description\"\"\"\n\n def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None):\n \"\"\"TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description\"\"\"\n <|body_0|>\n\n def resids(self, isite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_1|>\n\n def spliceable_positions(self):\n \"\"\"selection of resids, and map 'global' index to selected index Returns: TYPE: Description\"\"\"\n <|body_2|>\n\n def is_compatible(self, isite, ires, jsite, jres):\n \"\"\"TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_3|>\n\n def sitepair_allowed(self, isite, jsite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_4|>\n\n def __repr__(self):\n \"\"\"TODO: Summary Returns: TYPE: Description\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.body = body\n chains = list(body.split_by_chain())\n self.start_of_chain = {i + 1: sum((len(c) for c in chains[:i])) for i in range(len(chains))}\n self.end_of_chain = {i + 1: sum((len(c) for c in chains[:i + 1])) for i in range(len(chains))}\n self.start_of_chain[None] = 0\n self.chains = {i + 1: c for i, c in enumerate(chains)}\n self.bodyid = bodyid\n if callable(sites):\n sites = sites(body)\n if isinstance(sites, SpliceSite):\n sites = [sites]\n self.sites = list(sites)\n for i, site in enumerate(self.sites):\n if isinstance(site, str):\n raise ValueError('site currently must be (sele, polarity)')\n if not isinstance(site, SpliceSite):\n if isinstance(site, dict):\n self.sites[i] = SpliceSite(**site)\n else:\n if not isinstance(site, Iterable):\n self.sites[i] = (site,)\n self.sites[i] = SpliceSite(*site)\n self.nsite = dict(N=0, C=0)\n for s in self.sites:\n self.nsite[s.polarity] += 1\n self.min_seg_len = min_seg_len\n self._resids_list = [site._resids(self) for site in self.sites]\n self._len_body = len(body)\n self._chains = np.array([body.chain(i + 1) for i in range(len(body))])\n self.allowed_pairs = allowed_pairs\n<|end_body_0|>\n\n<|body_start_1|>\n if isite < 0:\n return [None]\n return self._resids_list[isite]\n<|end_body_1|>\n\n<|body_start_2|>\n resid_subset = set()\n for i in range(len(self.sites)):\n resid_subset |= set(self._resids_list[i])\n resid_subset = np.array(list(resid_subset))\n N = self._len_body + 1\n val, idx = np.where(0 == np.arange(N)[np.newaxis, :] - resid_subset[:, np.newaxis])\n to_subset = np.array(N * [-1])\n to_subset[idx] = val\n assert np.all(to_subset[resid_subset] == np.arange(len(resid_subset)))\n return (resid_subset, to_subset)\n<|end_body_2|>\n\n<|body_start_3|>\n if ires < 0 or jres < 0:\n return True\n assert 0 < ires <= self._len_body and 0 < jres <= self._len_body\n ichain, jchain = (self._chains[ires - 1], self._chains[jres - 1])\n if ichain == jchain:\n ipol = self.sites[isite].polarity\n jpol = self.sites[jsite].polarity\n if ipol == jpol:\n return False\n if ipol == 'N':\n seglen = jres - ires + 1\n else:\n seglen = ires - jres + 1\n if seglen < self.min_seg_len:\n return False\n return True\n<|end_body_3|>\n\n<|body_start_4|>\n if isite == jsite:\n return False\n if isite < 0 or jsite < 0:\n return True\n if self.allowed_pairs is not None and (isite, jsite) not in self.allowed_pairs:\n return False\n return True\n<|end_body_4|>\n\n<|body_start_5|>\n sites = str([(s._resids(self), s.polarity) for s in self.sites])\n if len(sites) > 30:\n sites = sites[:30] + '...'\n return 'Spliceable: body=(' + str(self._len_body) + ',' + str(self.body).splitlines()[0].split('/')[-1] + '), sites=' + sites\n<|end_body_5|>\n", "revision_id": "27993e33a43474d647ecd8277b210d4206858f0b", "skeleton": "<|skeleton|>\nclass Spliceable:\n \"\"\"TODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description\"\"\"\n\n def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None):\n \"\"\"TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description\"\"\"\n <|body_0|>\n\n def resids(self, isite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_1|>\n\n def spliceable_positions(self):\n \"\"\"selection of resids, and map 'global' index to selected index Returns: TYPE: Description\"\"\"\n <|body_2|>\n\n def is_compatible(self, isite, ires, jsite, jres):\n \"\"\"TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_3|>\n\n def sitepair_allowed(self, isite, jsite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description\"\"\"\n <|body_4|>\n\n def __repr__(self):\n \"\"\"TODO: Summary Returns: TYPE: Description\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Spliceable:\n \"\"\"TODO: Summary Attributes: allowed_pairs (TYPE): Description body (TYPE): Description bodyid (TYPE): Description chains (TYPE): Description end_of_chain (TYPE): Description min_seg_len (TYPE): Description nsite (TYPE): Description sites (TYPE): Description start_of_chain (TYPE): Description\"\"\"\n\n def __init__(self, body, sites, *, bodyid=None, min_seg_len=1, allowed_pairs=None):\n \"\"\"TODO: Summary Args: body (TYPE): Description sites (TYPE): Description bodyid (None, optional): Description min_seg_len (int, optional): Description allowed_pairs (None, optional): Description Raises: ValueError: Description\"\"\"\n self.body = body\n chains = list(body.split_by_chain())\n self.start_of_chain = {i + 1: sum((len(c) for c in chains[:i])) for i in range(len(chains))}\n self.end_of_chain = {i + 1: sum((len(c) for c in chains[:i + 1])) for i in range(len(chains))}\n self.start_of_chain[None] = 0\n self.chains = {i + 1: c for i, c in enumerate(chains)}\n self.bodyid = bodyid\n if callable(sites):\n sites = sites(body)\n if isinstance(sites, SpliceSite):\n sites = [sites]\n self.sites = list(sites)\n for i, site in enumerate(self.sites):\n if isinstance(site, str):\n raise ValueError('site currently must be (sele, polarity)')\n if not isinstance(site, SpliceSite):\n if isinstance(site, dict):\n self.sites[i] = SpliceSite(**site)\n else:\n if not isinstance(site, Iterable):\n self.sites[i] = (site,)\n self.sites[i] = SpliceSite(*site)\n self.nsite = dict(N=0, C=0)\n for s in self.sites:\n self.nsite[s.polarity] += 1\n self.min_seg_len = min_seg_len\n self._resids_list = [site._resids(self) for site in self.sites]\n self._len_body = len(body)\n self._chains = np.array([body.chain(i + 1) for i in range(len(body))])\n self.allowed_pairs = allowed_pairs\n\n def resids(self, isite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description Returns: TYPE: Description\"\"\"\n if isite < 0:\n return [None]\n return self._resids_list[isite]\n\n def spliceable_positions(self):\n \"\"\"selection of resids, and map 'global' index to selected index Returns: TYPE: Description\"\"\"\n resid_subset = set()\n for i in range(len(self.sites)):\n resid_subset |= set(self._resids_list[i])\n resid_subset = np.array(list(resid_subset))\n N = self._len_body + 1\n val, idx = np.where(0 == np.arange(N)[np.newaxis, :] - resid_subset[:, np.newaxis])\n to_subset = np.array(N * [-1])\n to_subset[idx] = val\n assert np.all(to_subset[resid_subset] == np.arange(len(resid_subset)))\n return (resid_subset, to_subset)\n\n def is_compatible(self, isite, ires, jsite, jres):\n \"\"\"TODO: Summary Args: isite (TYPE): Description ires (TYPE): Description jsite (TYPE): Description jres (TYPE): Description Returns: TYPE: Description\"\"\"\n if ires < 0 or jres < 0:\n return True\n assert 0 < ires <= self._len_body and 0 < jres <= self._len_body\n ichain, jchain = (self._chains[ires - 1], self._chains[jres - 1])\n if ichain == jchain:\n ipol = self.sites[isite].polarity\n jpol = self.sites[jsite].polarity\n if ipol == jpol:\n return False\n if ipol == 'N':\n seglen = jres - ires + 1\n else:\n seglen = ires - jres + 1\n if seglen < self.min_seg_len:\n return False\n return True\n\n def sitepair_allowed(self, isite, jsite):\n \"\"\"TODO: Summary Args: isite (TYPE): Description jsite (TYPE): Description Returns: TYPE: Description\"\"\"\n if isite == jsite:\n return False\n if isite < 0 or jsite < 0:\n return True\n if self.allowed_pairs is not None and (isite, jsite) not in self.allowed_pairs:\n return False\n return True\n\n def __repr__(self):\n \"\"\"TODO: Summary Returns: TYPE: Description\"\"\"\n sites = str([(s._resids(self), s.polarity) for s in self.sites])\n if len(sites) > 30:\n sites = sites[:30] + '...'\n return 'Spliceable: body=(' + str(self._len_body) + ',' + str(self.body).splitlines()[0].split('/')[-1] + '), sites=' + sites\n", "source": "the_stack_v2_python_sparse", "source_path": "worms/segments.py", "source_repo": "willsheffler/worms", "split": "test", "star_events_count": 6} {"blob_id": "1559636107666f7d3c3cccd3788e9d7957d84276", "bodies": ["unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['func_a'])\nelf = parseelf.ParseELF(self.tempdir, 'liba.so', self._ldpaths)\nself.assertTrue('is_lib' in elf)\nself.assertTrue(elf['is_lib'])", "unittest_lib.BuildELF(os.path.join(self.tempdir, 'abc_main'), executable=True)\nelf = parseelf.ParseELF(self.tempdir, 'abc_main', self._ldpaths)\nself.assertTrue('is_lib' in elf)\nself.assertFalse(elf['is_lib'])", "osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), 'foo')\nself.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\nosutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), '\\x7fELF-foo')\nself.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))", "unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\nunittest_lib.BuildELF(os.path.join(self.tempdir, 'libxyz.so'), defined_symbols=['fx', 'fy', 'fz'], undefined_symbols=['fa', 'fb', 'fc'], used_libs=['abc'])\nelf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths, parse_symbols=False)\nself.assertFalse('imp_sym' in elf)\nself.assertFalse('exp_sym' in elf)\nelf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths)\nself.assertTrue('imp_sym' in elf)\nself.assertTrue('exp_sym' in elf)\nself.assertEquals(elf['imp_sym'], set(['fa', 'fb', 'fc']))\nself.assertEquals(set((k for k, (_, _, st_shndx) in elf['exp_sym'].iteritems() if st_shndx == 'SHT_DYNSYM')), set(['fx', 'fy', 'fz']))\nfor sym in ['fx', 'fy', 'fz']:\n self.assertEquals('STB_GLOBAL', elf['exp_sym'][sym][0])", "unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\nunittest_lib.BuildELF(os.path.join(self.tempdir, 'libu.so'), defined_symbols=['fu'], undefined_symbols=['fa'], used_libs=['abc'])\nunittest_lib.BuildELF(os.path.join(self.tempdir, 'libv.so'), defined_symbols=['fv'], undefined_symbols=['fb'], used_libs=['abc'])\nunittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fu', 'fv'], used_libs=['u', 'v'], executable=True)\nelf_prog = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\nself.assertTrue('libu.so' in elf_prog['needed'])\nself.assertTrue('libv.so' in elf_prog['needed'])\nself.assertFalse('libabc.so' in elf_prog['needed'])", "unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['fa'])\nunittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fa'], used_libs=['a'], executable=True)\nelf = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\nfor lib in elf['libs'].values():\n for path in ('realpath', 'path'):\n if lib[path] is None:\n continue\n self.assertFalse(lib[path].startswith('/'))\n self.assertFalse(lib[path].startswith(self.tempdir))\n self.assertTrue(lib[path] == elf['interp'] or os.path.exists(os.path.join(self.tempdir, lib[path])))"], "bodies_text": "<|body_start_0|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['func_a'])\n elf = parseelf.ParseELF(self.tempdir, 'liba.so', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertTrue(elf['is_lib'])\n<|end_body_0|>\n\n<|body_start_1|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'abc_main'), executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'abc_main', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertFalse(elf['is_lib'])\n<|end_body_1|>\n\n<|body_start_2|>\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), 'foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), '\\x7fELF-foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n<|end_body_2|>\n\n<|body_start_3|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libxyz.so'), defined_symbols=['fx', 'fy', 'fz'], undefined_symbols=['fa', 'fb', 'fc'], used_libs=['abc'])\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths, parse_symbols=False)\n self.assertFalse('imp_sym' in elf)\n self.assertFalse('exp_sym' in elf)\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths)\n self.assertTrue('imp_sym' in elf)\n self.assertTrue('exp_sym' in elf)\n self.assertEquals(elf['imp_sym'], set(['fa', 'fb', 'fc']))\n self.assertEquals(set((k for k, (_, _, st_shndx) in elf['exp_sym'].iteritems() if st_shndx == 'SHT_DYNSYM')), set(['fx', 'fy', 'fz']))\n for sym in ['fx', 'fy', 'fz']:\n self.assertEquals('STB_GLOBAL', elf['exp_sym'][sym][0])\n<|end_body_3|>\n\n<|body_start_4|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libu.so'), defined_symbols=['fu'], undefined_symbols=['fa'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libv.so'), defined_symbols=['fv'], undefined_symbols=['fb'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fu', 'fv'], used_libs=['u', 'v'], executable=True)\n elf_prog = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n self.assertTrue('libu.so' in elf_prog['needed'])\n self.assertTrue('libv.so' in elf_prog['needed'])\n self.assertFalse('libabc.so' in elf_prog['needed'])\n<|end_body_4|>\n\n<|body_start_5|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['fa'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fa'], used_libs=['a'], executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n for lib in elf['libs'].values():\n for path in ('realpath', 'path'):\n if lib[path] is None:\n continue\n self.assertFalse(lib[path].startswith('/'))\n self.assertFalse(lib[path].startswith(self.tempdir))\n self.assertTrue(lib[path] == elf['interp'] or os.path.exists(os.path.join(self.tempdir, lib[path])))\n<|end_body_5|>\n", "class_docstring": "Test the ELF parsing functions.", "class_name": "ELFParsingTest", "detected_licenses": ["LGPL-2.0-or-later", "GPL-1.0-or-later", "MIT", "Apache-2.0", "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ELFParsingTest:\n \"\"\"Test the ELF parsing functions.\"\"\"\n\n def testIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for libs.\"\"\"\n <|body_0|>\n\n def testNotIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for executables.\"\"\"\n <|body_1|>\n\n def testUnsupportedFiles(self):\n \"\"\"Tests unsupported files are ignored.\"\"\"\n <|body_2|>\n\n def testParsedSymbols(self):\n \"\"\"Tests the list of imported/exported symbols.\"\"\"\n <|body_3|>\n\n def testLibDependencies(self):\n \"\"\"Tests the list direct dependencies.\"\"\"\n <|body_4|>\n\n def testRelativeLibPaths(self):\n \"\"\"Test that the paths reported by ParseELF are relative to root.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['func_a'])\n elf = parseelf.ParseELF(self.tempdir, 'liba.so', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertTrue(elf['is_lib'])\n<|end_body_0|>\n\n<|body_start_1|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'abc_main'), executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'abc_main', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertFalse(elf['is_lib'])\n<|end_body_1|>\n\n<|body_start_2|>\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), 'foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), '\\x7fELF-foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n<|end_body_2|>\n\n<|body_start_3|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libxyz.so'), defined_symbols=['fx', 'fy', 'fz'], undefined_symbols=['fa', 'fb', 'fc'], used_libs=['abc'])\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths, parse_symbols=False)\n self.assertFalse('imp_sym' in elf)\n self.assertFalse('exp_sym' in elf)\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths)\n self.assertTrue('imp_sym' in elf)\n self.assertTrue('exp_sym' in elf)\n self.assertEquals(elf['imp_sym'], set(['fa', 'fb', 'fc']))\n self.assertEquals(set((k for k, (_, _, st_shndx) in elf['exp_sym'].iteritems() if st_shndx == 'SHT_DYNSYM')), set(['fx', 'fy', 'fz']))\n for sym in ['fx', 'fy', 'fz']:\n self.assertEquals('STB_GLOBAL', elf['exp_sym'][sym][0])\n<|end_body_3|>\n\n<|body_start_4|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libu.so'), defined_symbols=['fu'], undefined_symbols=['fa'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libv.so'), defined_symbols=['fv'], undefined_symbols=['fb'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fu', 'fv'], used_libs=['u', 'v'], executable=True)\n elf_prog = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n self.assertTrue('libu.so' in elf_prog['needed'])\n self.assertTrue('libv.so' in elf_prog['needed'])\n self.assertFalse('libabc.so' in elf_prog['needed'])\n<|end_body_4|>\n\n<|body_start_5|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['fa'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fa'], used_libs=['a'], executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n for lib in elf['libs'].values():\n for path in ('realpath', 'path'):\n if lib[path] is None:\n continue\n self.assertFalse(lib[path].startswith('/'))\n self.assertFalse(lib[path].startswith(self.tempdir))\n self.assertTrue(lib[path] == elf['interp'] or os.path.exists(os.path.join(self.tempdir, lib[path])))\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000091", "length_bytes": 4985, "license_type": "permissive", "methods": [{"docstring": "Tests the 'is_lib' attribute is inferred correctly for libs.", "name": "testIsLib", "signature": "def testIsLib(self)"}, {"docstring": "Tests the 'is_lib' attribute is inferred correctly for executables.", "name": "testNotIsLib", "signature": "def testNotIsLib(self)"}, {"docstring": "Tests unsupported files are ignored.", "name": "testUnsupportedFiles", "signature": "def testUnsupportedFiles(self)"}, {"docstring": "Tests the list of imported/exported symbols.", "name": "testParsedSymbols", "signature": "def testParsedSymbols(self)"}, {"docstring": "Tests the list direct dependencies.", "name": "testLibDependencies", "signature": "def testLibDependencies(self)"}, {"docstring": "Test that the paths reported by ParseELF are relative to root.", "name": "testRelativeLibPaths", "signature": "def testRelativeLibPaths(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_003617", "prompt": "Implement the Python class `ELFParsingTest` described below.\n\nClass description:\nTest the ELF parsing functions.\n\nMethod signatures and docstrings:\n- def testIsLib(self): Tests the 'is_lib' attribute is inferred correctly for libs.\n- def testNotIsLib(self): Tests the 'is_lib' attribute is inferred correctly for executables.\n- def testUnsupportedFiles(self): Tests unsupported files are ignored.\n- def testParsedSymbols(self): Tests the list of imported/exported symbols.\n- def testLibDependencies(self): Tests the list direct dependencies.\n- def testRelativeLibPaths(self): Test that the paths reported by ParseELF are relative to root.", "prompted_full_text": "Implement the Python class `ELFParsingTest` described below.\n\nClass description:\nTest the ELF parsing functions.\n\nMethod signatures and docstrings:\n- def testIsLib(self): Tests the 'is_lib' attribute is inferred correctly for libs.\n- def testNotIsLib(self): Tests the 'is_lib' attribute is inferred correctly for executables.\n- def testUnsupportedFiles(self): Tests unsupported files are ignored.\n- def testParsedSymbols(self): Tests the list of imported/exported symbols.\n- def testLibDependencies(self): Tests the list direct dependencies.\n- def testRelativeLibPaths(self): Test that the paths reported by ParseELF are relative to root.\n\n<|skeleton|>\nclass ELFParsingTest:\n \"\"\"Test the ELF parsing functions.\"\"\"\n\n def testIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for libs.\"\"\"\n <|body_0|>\n\n def testNotIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for executables.\"\"\"\n <|body_1|>\n\n def testUnsupportedFiles(self):\n \"\"\"Tests unsupported files are ignored.\"\"\"\n <|body_2|>\n\n def testParsedSymbols(self):\n \"\"\"Tests the list of imported/exported symbols.\"\"\"\n <|body_3|>\n\n def testLibDependencies(self):\n \"\"\"Tests the list direct dependencies.\"\"\"\n <|body_4|>\n\n def testRelativeLibPaths(self):\n \"\"\"Test that the paths reported by ParseELF are relative to root.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['func_a'])\n elf = parseelf.ParseELF(self.tempdir, 'liba.so', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertTrue(elf['is_lib'])\n<|end_body_0|>\n\n<|body_start_1|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'abc_main'), executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'abc_main', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertFalse(elf['is_lib'])\n<|end_body_1|>\n\n<|body_start_2|>\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), 'foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), '\\x7fELF-foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n<|end_body_2|>\n\n<|body_start_3|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libxyz.so'), defined_symbols=['fx', 'fy', 'fz'], undefined_symbols=['fa', 'fb', 'fc'], used_libs=['abc'])\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths, parse_symbols=False)\n self.assertFalse('imp_sym' in elf)\n self.assertFalse('exp_sym' in elf)\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths)\n self.assertTrue('imp_sym' in elf)\n self.assertTrue('exp_sym' in elf)\n self.assertEquals(elf['imp_sym'], set(['fa', 'fb', 'fc']))\n self.assertEquals(set((k for k, (_, _, st_shndx) in elf['exp_sym'].iteritems() if st_shndx == 'SHT_DYNSYM')), set(['fx', 'fy', 'fz']))\n for sym in ['fx', 'fy', 'fz']:\n self.assertEquals('STB_GLOBAL', elf['exp_sym'][sym][0])\n<|end_body_3|>\n\n<|body_start_4|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libu.so'), defined_symbols=['fu'], undefined_symbols=['fa'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libv.so'), defined_symbols=['fv'], undefined_symbols=['fb'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fu', 'fv'], used_libs=['u', 'v'], executable=True)\n elf_prog = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n self.assertTrue('libu.so' in elf_prog['needed'])\n self.assertTrue('libv.so' in elf_prog['needed'])\n self.assertFalse('libabc.so' in elf_prog['needed'])\n<|end_body_4|>\n\n<|body_start_5|>\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['fa'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fa'], used_libs=['a'], executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n for lib in elf['libs'].values():\n for path in ('realpath', 'path'):\n if lib[path] is None:\n continue\n self.assertFalse(lib[path].startswith('/'))\n self.assertFalse(lib[path].startswith(self.tempdir))\n self.assertTrue(lib[path] == elf['interp'] or os.path.exists(os.path.join(self.tempdir, lib[path])))\n<|end_body_5|>\n", "revision_id": "72a05af97787001756bae2511b7985e61498c965", "skeleton": "<|skeleton|>\nclass ELFParsingTest:\n \"\"\"Test the ELF parsing functions.\"\"\"\n\n def testIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for libs.\"\"\"\n <|body_0|>\n\n def testNotIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for executables.\"\"\"\n <|body_1|>\n\n def testUnsupportedFiles(self):\n \"\"\"Tests unsupported files are ignored.\"\"\"\n <|body_2|>\n\n def testParsedSymbols(self):\n \"\"\"Tests the list of imported/exported symbols.\"\"\"\n <|body_3|>\n\n def testLibDependencies(self):\n \"\"\"Tests the list direct dependencies.\"\"\"\n <|body_4|>\n\n def testRelativeLibPaths(self):\n \"\"\"Test that the paths reported by ParseELF are relative to root.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ELFParsingTest:\n \"\"\"Test the ELF parsing functions.\"\"\"\n\n def testIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for libs.\"\"\"\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['func_a'])\n elf = parseelf.ParseELF(self.tempdir, 'liba.so', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertTrue(elf['is_lib'])\n\n def testNotIsLib(self):\n \"\"\"Tests the 'is_lib' attribute is inferred correctly for executables.\"\"\"\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'abc_main'), executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'abc_main', self._ldpaths)\n self.assertTrue('is_lib' in elf)\n self.assertFalse(elf['is_lib'])\n\n def testUnsupportedFiles(self):\n \"\"\"Tests unsupported files are ignored.\"\"\"\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), 'foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n osutils.WriteFile(os.path.join(self.tempdir, 'foo.so'), '\\x7fELF-foo')\n self.assertEquals(None, parseelf.ParseELF(self.tempdir, 'foo.so', self._ldpaths))\n\n def testParsedSymbols(self):\n \"\"\"Tests the list of imported/exported symbols.\"\"\"\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libxyz.so'), defined_symbols=['fx', 'fy', 'fz'], undefined_symbols=['fa', 'fb', 'fc'], used_libs=['abc'])\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths, parse_symbols=False)\n self.assertFalse('imp_sym' in elf)\n self.assertFalse('exp_sym' in elf)\n elf = parseelf.ParseELF(self.tempdir, 'libxyz.so', self._ldpaths)\n self.assertTrue('imp_sym' in elf)\n self.assertTrue('exp_sym' in elf)\n self.assertEquals(elf['imp_sym'], set(['fa', 'fb', 'fc']))\n self.assertEquals(set((k for k, (_, _, st_shndx) in elf['exp_sym'].iteritems() if st_shndx == 'SHT_DYNSYM')), set(['fx', 'fy', 'fz']))\n for sym in ['fx', 'fy', 'fz']:\n self.assertEquals('STB_GLOBAL', elf['exp_sym'][sym][0])\n\n def testLibDependencies(self):\n \"\"\"Tests the list direct dependencies.\"\"\"\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libabc.so'), defined_symbols=['fa', 'fb', 'fc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libu.so'), defined_symbols=['fu'], undefined_symbols=['fa'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'libv.so'), defined_symbols=['fv'], undefined_symbols=['fb'], used_libs=['abc'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fu', 'fv'], used_libs=['u', 'v'], executable=True)\n elf_prog = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n self.assertTrue('libu.so' in elf_prog['needed'])\n self.assertTrue('libv.so' in elf_prog['needed'])\n self.assertFalse('libabc.so' in elf_prog['needed'])\n\n def testRelativeLibPaths(self):\n \"\"\"Test that the paths reported by ParseELF are relative to root.\"\"\"\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'liba.so'), ['fa'])\n unittest_lib.BuildELF(os.path.join(self.tempdir, 'prog'), undefined_symbols=['fa'], used_libs=['a'], executable=True)\n elf = parseelf.ParseELF(self.tempdir, 'prog', self._ldpaths)\n for lib in elf['libs'].values():\n for path in ('realpath', 'path'):\n if lib[path] is None:\n continue\n self.assertFalse(lib[path].startswith('/'))\n self.assertFalse(lib[path].startswith(self.tempdir))\n self.assertTrue(lib[path] == elf['interp'] or os.path.exists(os.path.join(self.tempdir, lib[path])))\n", "source": "the_stack_v2_python_sparse", "source_path": "third_party/chromite/lib/parseelf_unittest.py", "source_repo": "metux/chromium-suckless", "split": "test", "star_events_count": 5} {"blob_id": "c81e772c94b36600d0f712b93667c044826d5abf", "bodies": ["logging.info('Initializing dwu fp16 optimizer')\nself.since_last_invalid = 0\nself.loss_scale = loss_scale\nself.dls_downscale = dls_downscale\nself.dls_upscale = dls_upscale\nself.dls_upscale_interval = dls_upscale_interval\nself.world_size = utils.get_world_size()\nself.fp16_model = fp16_model", "scaling_factor = self.loss_scale * self.world_size\noptimizer.set_global_scale(scaling_factor)\nloss *= self.loss_scale\nloss.backward()\noptimizer.complete_reductions()\nif not update:\n torch.cuda.synchronize()\n return\nnorm = optimizer.L2_grad_norm / scaling_factor\nshould_update = math.isfinite(norm)\nif should_update:\n if scheduler is not None:\n scheduler.step()\n optimizer.step(skip_overflow_check=True)\nif should_update:\n self.since_last_invalid += 1\nelse:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logging.info(f'Gradient norm: {norm}')\n logging.info(f'Skipped batch, new scale: {self.loss_scale}')\nif self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n logging.info(f'Upscaling, new scale: {self.loss_scale}')\n self.since_last_invalid = 0\nfor p in self.fp16_model.parameters():\n p.grad = None"], "bodies_text": "<|body_start_0|>\n logging.info('Initializing dwu fp16 optimizer')\n self.since_last_invalid = 0\n self.loss_scale = loss_scale\n self.dls_downscale = dls_downscale\n self.dls_upscale = dls_upscale\n self.dls_upscale_interval = dls_upscale_interval\n self.world_size = utils.get_world_size()\n self.fp16_model = fp16_model\n<|end_body_0|>\n\n<|body_start_1|>\n scaling_factor = self.loss_scale * self.world_size\n optimizer.set_global_scale(scaling_factor)\n loss *= self.loss_scale\n loss.backward()\n optimizer.complete_reductions()\n if not update:\n torch.cuda.synchronize()\n return\n norm = optimizer.L2_grad_norm / scaling_factor\n should_update = math.isfinite(norm)\n if should_update:\n if scheduler is not None:\n scheduler.step()\n optimizer.step(skip_overflow_check=True)\n if should_update:\n self.since_last_invalid += 1\n else:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logging.info(f'Gradient norm: {norm}')\n logging.info(f'Skipped batch, new scale: {self.loss_scale}')\n if self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n logging.info(f'Upscaling, new scale: {self.loss_scale}')\n self.since_last_invalid = 0\n for p in self.fp16_model.parameters():\n p.grad = None\n<|end_body_1|>\n", "class_docstring": "Distributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor", "class_name": "DwuFp16Optimizer", "detected_licenses": ["Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DwuFp16Optimizer:\n \"\"\"Distributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\"\"\"\n\n def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128):\n \"\"\"Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling\"\"\"\n <|body_0|>\n\n def step(self, loss, optimizer, scheduler, update=True):\n \"\"\"Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logging.info('Initializing dwu fp16 optimizer')\n self.since_last_invalid = 0\n self.loss_scale = loss_scale\n self.dls_downscale = dls_downscale\n self.dls_upscale = dls_upscale\n self.dls_upscale_interval = dls_upscale_interval\n self.world_size = utils.get_world_size()\n self.fp16_model = fp16_model\n<|end_body_0|>\n\n<|body_start_1|>\n scaling_factor = self.loss_scale * self.world_size\n optimizer.set_global_scale(scaling_factor)\n loss *= self.loss_scale\n loss.backward()\n optimizer.complete_reductions()\n if not update:\n torch.cuda.synchronize()\n return\n norm = optimizer.L2_grad_norm / scaling_factor\n should_update = math.isfinite(norm)\n if should_update:\n if scheduler is not None:\n scheduler.step()\n optimizer.step(skip_overflow_check=True)\n if should_update:\n self.since_last_invalid += 1\n else:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logging.info(f'Gradient norm: {norm}')\n logging.info(f'Skipped batch, new scale: {self.loss_scale}')\n if self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n logging.info(f'Upscaling, new scale: {self.loss_scale}')\n self.since_last_invalid = 0\n for p in self.fp16_model.parameters():\n p.grad = None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000092", "length_bytes": 12553, "license_type": "permissive", "methods": [{"docstring": "Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling", "name": "__init__", "signature": "def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128)"}, {"docstring": "Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update", "name": "step", "signature": "def step(self, loss, optimizer, scheduler, update=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043404", "prompt": "Implement the Python class `DwuFp16Optimizer` described below.\n\nClass description:\nDistributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\n\nMethod signatures and docstrings:\n- def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128): Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling\n- def step(self, loss, optimizer, scheduler, update=True): Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update", "prompted_full_text": "Implement the Python class `DwuFp16Optimizer` described below.\n\nClass description:\nDistributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\n\nMethod signatures and docstrings:\n- def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128): Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling\n- def step(self, loss, optimizer, scheduler, update=True): Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update\n\n<|skeleton|>\nclass DwuFp16Optimizer:\n \"\"\"Distributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\"\"\"\n\n def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128):\n \"\"\"Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling\"\"\"\n <|body_0|>\n\n def step(self, loss, optimizer, scheduler, update=True):\n \"\"\"Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logging.info('Initializing dwu fp16 optimizer')\n self.since_last_invalid = 0\n self.loss_scale = loss_scale\n self.dls_downscale = dls_downscale\n self.dls_upscale = dls_upscale\n self.dls_upscale_interval = dls_upscale_interval\n self.world_size = utils.get_world_size()\n self.fp16_model = fp16_model\n<|end_body_0|>\n\n<|body_start_1|>\n scaling_factor = self.loss_scale * self.world_size\n optimizer.set_global_scale(scaling_factor)\n loss *= self.loss_scale\n loss.backward()\n optimizer.complete_reductions()\n if not update:\n torch.cuda.synchronize()\n return\n norm = optimizer.L2_grad_norm / scaling_factor\n should_update = math.isfinite(norm)\n if should_update:\n if scheduler is not None:\n scheduler.step()\n optimizer.step(skip_overflow_check=True)\n if should_update:\n self.since_last_invalid += 1\n else:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logging.info(f'Gradient norm: {norm}')\n logging.info(f'Skipped batch, new scale: {self.loss_scale}')\n if self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n logging.info(f'Upscaling, new scale: {self.loss_scale}')\n self.since_last_invalid = 0\n for p in self.fp16_model.parameters():\n p.grad = None\n<|end_body_1|>\n", "revision_id": "e017c9359f66e2d814c6990d1ffa56654a73f5b0", "skeleton": "<|skeleton|>\nclass DwuFp16Optimizer:\n \"\"\"Distributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\"\"\"\n\n def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128):\n \"\"\"Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling\"\"\"\n <|body_0|>\n\n def step(self, loss, optimizer, scheduler, update=True):\n \"\"\"Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DwuFp16Optimizer:\n \"\"\"Distributed weight update mixed precision optimizer with dynamic loss scaling and backoff. https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html#scalefactor\"\"\"\n\n def __init__(self, fp16_model, loss_scale=1024, dls_downscale=2, dls_upscale=2, dls_upscale_interval=128):\n \"\"\"Constructor for the DwuFp16Optimizer. :param fp16_model: model (previously casted to half) :param loss_scale: initial loss scale :param dls_downscale: loss downscale factor, loss scale is divided by this factor when NaN/INF occurs in the gradients :param dls_upscale: loss upscale factor, loss scale is multiplied by this factor if previous dls_upscale_interval batches finished successfully :param dls_upscale_interval: interval for loss scale upscaling\"\"\"\n logging.info('Initializing dwu fp16 optimizer')\n self.since_last_invalid = 0\n self.loss_scale = loss_scale\n self.dls_downscale = dls_downscale\n self.dls_upscale = dls_upscale\n self.dls_upscale_interval = dls_upscale_interval\n self.world_size = utils.get_world_size()\n self.fp16_model = fp16_model\n\n def step(self, loss, optimizer, scheduler, update=True):\n \"\"\"Performs one step of the optimizer. Applies loss scaling, computes gradients in fp16, converts gradients to fp32, inverts scaling and applies optional gradient norm clipping. If gradients are finite, it applies update to fp32 master weights and copies updated parameters to fp16 model for the next iteration. If gradients are not finite, it skips the batch and adjusts scaling factor for the next iteration. :param loss: value of loss function :param optimizer: optimizer :param update: if True executes weight update\"\"\"\n scaling_factor = self.loss_scale * self.world_size\n optimizer.set_global_scale(scaling_factor)\n loss *= self.loss_scale\n loss.backward()\n optimizer.complete_reductions()\n if not update:\n torch.cuda.synchronize()\n return\n norm = optimizer.L2_grad_norm / scaling_factor\n should_update = math.isfinite(norm)\n if should_update:\n if scheduler is not None:\n scheduler.step()\n optimizer.step(skip_overflow_check=True)\n if should_update:\n self.since_last_invalid += 1\n else:\n self.loss_scale /= self.dls_downscale\n self.since_last_invalid = 0\n logging.info(f'Gradient norm: {norm}')\n logging.info(f'Skipped batch, new scale: {self.loss_scale}')\n if self.since_last_invalid >= self.dls_upscale_interval:\n self.loss_scale *= self.dls_upscale\n self.loss_scale = min(self.loss_scale, 8192.0)\n logging.info(f'Upscaling, new scale: {self.loss_scale}')\n self.since_last_invalid = 0\n for p in self.fp16_model.parameters():\n p.grad = None\n", "source": "the_stack_v2_python_sparse", "source_path": "Inspur/benchmarks/gnmt/implementations/implementation_closed/seq2seq/train/fp_optimizers.py", "source_repo": "piyushghai/training_results_v0.7", "split": "test", "star_events_count": 0} {"blob_id": "dab1a8acd39b25c85be5e682821e9a2a84f4b3e2", "bodies": ["super().__init__(**kwargs)\nself.custom_loss_function = custom_loss_function\nself.start_train = start_train\nself.end_train = end_train", "dwp = create_list_period(self.start_train, self.end_train, self.is_weekly_forecast)\ndtp = get_all_datehorizons(dwp, self.forecasting_horizons, self.is_weekly_forecast)\nLogger.info('Scope data used to train model (dates_to_predict)', '[%d, %d]' % (min(dtp), max(dtp)), self.__class__.__name__)\nif self.is_sell_in_model:\n feature_creator = FeatureEngineeringSellIn(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\nelse:\n feature_creator = FeatureEngineeringSellOut(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\ntrain = feature_creator.build_train(data, dwp, dtp)\nx_train = train[Config.find_relevant_feature_columns(train)]\ny_train = train[n.FIELD_LABEL]\nreturn (feature_creator, train, x_train, y_train)", "feature_creator, train, x_train, y_train = self.build_train_set(data)\nmodel = ModelFactory(regressor=regressor, parameters=parameters, feature_creator=feature_creator, use_light_regressor=self.use_light_regressor, custom_loss_function=self.custom_loss_function)\nmodel.train_model(x_train, y_train)\nreturn (model, train)"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.custom_loss_function = custom_loss_function\n self.start_train = start_train\n self.end_train = end_train\n<|end_body_0|>\n\n<|body_start_1|>\n dwp = create_list_period(self.start_train, self.end_train, self.is_weekly_forecast)\n dtp = get_all_datehorizons(dwp, self.forecasting_horizons, self.is_weekly_forecast)\n Logger.info('Scope data used to train model (dates_to_predict)', '[%d, %d]' % (min(dtp), max(dtp)), self.__class__.__name__)\n if self.is_sell_in_model:\n feature_creator = FeatureEngineeringSellIn(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n else:\n feature_creator = FeatureEngineeringSellOut(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n train = feature_creator.build_train(data, dwp, dtp)\n x_train = train[Config.find_relevant_feature_columns(train)]\n y_train = train[n.FIELD_LABEL]\n return (feature_creator, train, x_train, y_train)\n<|end_body_1|>\n\n<|body_start_2|>\n feature_creator, train, x_train, y_train = self.build_train_set(data)\n model = ModelFactory(regressor=regressor, parameters=parameters, feature_creator=feature_creator, use_light_regressor=self.use_light_regressor, custom_loss_function=self.custom_loss_function)\n model.train_model(x_train, y_train)\n return (model, train)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Trainer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Trainer:\n\n def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs):\n \"\"\"Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.\"\"\"\n <|body_0|>\n\n def build_train_set(self, data: DataLoader):\n \"\"\"Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels\"\"\"\n <|body_1|>\n\n def fit(self, data: DataLoader, regressor=None, parameters=None):\n \"\"\"Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.custom_loss_function = custom_loss_function\n self.start_train = start_train\n self.end_train = end_train\n<|end_body_0|>\n\n<|body_start_1|>\n dwp = create_list_period(self.start_train, self.end_train, self.is_weekly_forecast)\n dtp = get_all_datehorizons(dwp, self.forecasting_horizons, self.is_weekly_forecast)\n Logger.info('Scope data used to train model (dates_to_predict)', '[%d, %d]' % (min(dtp), max(dtp)), self.__class__.__name__)\n if self.is_sell_in_model:\n feature_creator = FeatureEngineeringSellIn(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n else:\n feature_creator = FeatureEngineeringSellOut(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n train = feature_creator.build_train(data, dwp, dtp)\n x_train = train[Config.find_relevant_feature_columns(train)]\n y_train = train[n.FIELD_LABEL]\n return (feature_creator, train, x_train, y_train)\n<|end_body_1|>\n\n<|body_start_2|>\n feature_creator, train, x_train, y_train = self.build_train_set(data)\n model = ModelFactory(regressor=regressor, parameters=parameters, feature_creator=feature_creator, use_light_regressor=self.use_light_regressor, custom_loss_function=self.custom_loss_function)\n model.train_model(x_train, y_train)\n return (model, train)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000093", "length_bytes": 10846, "license_type": "no_license", "methods": [{"docstring": "Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.", "name": "__init__", "signature": "def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs)"}, {"docstring": "Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels", "name": "build_train_set", "signature": "def build_train_set(self, data: DataLoader)"}, {"docstring": "Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels", "name": "fit", "signature": "def fit(self, data: DataLoader, regressor=None, parameters=None)"}], "n_methods": 3, "prompt": "Implement the Python class `Trainer` described below.\n\nClass description:\nImplement the Trainer class.\n\nMethod signatures and docstrings:\n- def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs): Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.\n- def build_train_set(self, data: DataLoader): Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels\n- def fit(self, data: DataLoader, regressor=None, parameters=None): Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels", "prompted_full_text": "Implement the Python class `Trainer` described below.\n\nClass description:\nImplement the Trainer class.\n\nMethod signatures and docstrings:\n- def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs): Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.\n- def build_train_set(self, data: DataLoader): Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels\n- def fit(self, data: DataLoader, regressor=None, parameters=None): Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels\n\n<|skeleton|>\nclass Trainer:\n\n def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs):\n \"\"\"Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.\"\"\"\n <|body_0|>\n\n def build_train_set(self, data: DataLoader):\n \"\"\"Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels\"\"\"\n <|body_1|>\n\n def fit(self, data: DataLoader, regressor=None, parameters=None):\n \"\"\"Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.custom_loss_function = custom_loss_function\n self.start_train = start_train\n self.end_train = end_train\n<|end_body_0|>\n\n<|body_start_1|>\n dwp = create_list_period(self.start_train, self.end_train, self.is_weekly_forecast)\n dtp = get_all_datehorizons(dwp, self.forecasting_horizons, self.is_weekly_forecast)\n Logger.info('Scope data used to train model (dates_to_predict)', '[%d, %d]' % (min(dtp), max(dtp)), self.__class__.__name__)\n if self.is_sell_in_model:\n feature_creator = FeatureEngineeringSellIn(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n else:\n feature_creator = FeatureEngineeringSellOut(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n train = feature_creator.build_train(data, dwp, dtp)\n x_train = train[Config.find_relevant_feature_columns(train)]\n y_train = train[n.FIELD_LABEL]\n return (feature_creator, train, x_train, y_train)\n<|end_body_1|>\n\n<|body_start_2|>\n feature_creator, train, x_train, y_train = self.build_train_set(data)\n model = ModelFactory(regressor=regressor, parameters=parameters, feature_creator=feature_creator, use_light_regressor=self.use_light_regressor, custom_loss_function=self.custom_loss_function)\n model.train_model(x_train, y_train)\n return (model, train)\n<|end_body_2|>\n", "revision_id": "5aed9ef58f9618687839ab98c84800dcb57b77d6", "skeleton": "<|skeleton|>\nclass Trainer:\n\n def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs):\n \"\"\"Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.\"\"\"\n <|body_0|>\n\n def build_train_set(self, data: DataLoader):\n \"\"\"Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels\"\"\"\n <|body_1|>\n\n def fit(self, data: DataLoader, regressor=None, parameters=None):\n \"\"\"Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Trainer:\n def __init__(self, start_train, end_train, custom_loss_function=None, **kwargs):\n \"\"\"Model trainer object. Args: custom_loss_function(function or None): If not None, we use this loss function to train the model.\"\"\"\n super().__init__(**kwargs)\n self.custom_loss_function = custom_loss_function\n self.start_train = start_train\n self.end_train = end_train\n\n def build_train_set(self, data: DataLoader):\n \"\"\"Build the training set, the labels Args: data (DataLoader): data object containing all relevant tables Returns: train (pd.DataFrame): Train set including target labels x_train : train set in numpy array y_train: labels\"\"\"\n dwp = create_list_period(self.start_train, self.end_train, self.is_weekly_forecast)\n dtp = get_all_datehorizons(dwp, self.forecasting_horizons, self.is_weekly_forecast)\n Logger.info('Scope data used to train model (dates_to_predict)', '[%d, %d]' % (min(dtp), max(dtp)), self.__class__.__name__)\n if self.is_sell_in_model:\n feature_creator = FeatureEngineeringSellIn(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n else:\n feature_creator = FeatureEngineeringSellOut(is_weekly_forecast=self.is_weekly_forecast, granularity=self.granularity, feature_controller=self.feature_controller)\n train = feature_creator.build_train(data, dwp, dtp)\n x_train = train[Config.find_relevant_feature_columns(train)]\n y_train = train[n.FIELD_LABEL]\n return (feature_creator, train, x_train, y_train)\n\n def fit(self, data: DataLoader, regressor=None, parameters=None):\n \"\"\"Build the training set, the labels and train th model Args: data (DataLoader): data object containing all relevant tables Returns: model (ModelFactory): Trained model train (pd.DataFrame): Train set including target labels\"\"\"\n feature_creator, train, x_train, y_train = self.build_train_set(data)\n model = ModelFactory(regressor=regressor, parameters=parameters, feature_creator=feature_creator, use_light_regressor=self.use_light_regressor, custom_loss_function=self.custom_loss_function)\n model.train_model(x_train, y_train)\n return (model, train)\n", "source": "the_stack_v2_python_sparse", "source_path": "dcd_forecast_model/trainer.py", "source_repo": "MuditaLi/demanding-forecasting", "split": "test", "star_events_count": 0} {"blob_id": "0ae94af4a2142d3fe8203c0c5fa7d6a54b0fa15b", "bodies": ["super().__init__()\nself.flair_enabled = config['flair_enabled']\nself.label_vocab_size = label_vocab_size\nself.start_tag_index = label_vocab_size\nself.stop_tag_index = label_vocab_size + 1\nif self.flair_enabled:\n flair_cache = None\n if config['flair_cache_enabled']:\n flair_cache = FlairEmbeddingCache()\n flair_cache.load_cache(config['flair_cache_path'], config['flair_cutoff_dim'])\n self.flair_embedding = FlairEmbeddings(config['language'], flair_cache)\n total_embedding_dim = self.flair_embedding.embedding_dim\n self.flair_with_char_cnn = config['flair_with_char_cnn']\n if self.flair_with_char_cnn:\n self.char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n total_embedding_dim += config['char_num_kernels']\nelse:\n char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n self.token_embedding = TokenEmbedding(pretrained_embedding, char_cnn)\n total_embedding_dim = config['glove_dim'] + config['char_num_kernels']\nself.bi_rnn = BiRNN(total_embedding_dim, config['rnn_hidden_dim'], config['rnn_num_layers'], config['dropout'], config['rnn_type'], config['embed_dropout'], config['word_dropout'], config['locked_dropout'])\nself.emission = UnaryFactor(2 * config['rnn_hidden_dim'], label_vocab_size + 2, config['decoder_hidden_dim'], config['dropout'])\nself.transitions = torch.nn.Parameter(torch.randn(label_vocab_size + 2, label_vocab_size + 2))\nself.transitions.detach()[self.start_tag_index, :] = -10000\nself.transitions.detach()[:, self.stop_tag_index] = -10000", "if self.flair_enabled:\n embeddings = self.flair_embedding(strings)\n if self.flair_with_char_cnn:\n char_embeddings = self.char_cnn(token_chars)\n char_embeddings = char_embeddings.view(embeddings.shape[0], embeddings.shape[1], char_embeddings.shape[1])\n embeddings = torch.cat([embeddings, char_embeddings], dim=2)\nelse:\n embeddings = self.token_embedding(tokens, token_chars)\nrnn_output = self.bi_rnn(embeddings, lengths)\nreturn (self.emission(rnn_output), self.transitions)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.flair_enabled = config['flair_enabled']\n self.label_vocab_size = label_vocab_size\n self.start_tag_index = label_vocab_size\n self.stop_tag_index = label_vocab_size + 1\n if self.flair_enabled:\n flair_cache = None\n if config['flair_cache_enabled']:\n flair_cache = FlairEmbeddingCache()\n flair_cache.load_cache(config['flair_cache_path'], config['flair_cutoff_dim'])\n self.flair_embedding = FlairEmbeddings(config['language'], flair_cache)\n total_embedding_dim = self.flair_embedding.embedding_dim\n self.flair_with_char_cnn = config['flair_with_char_cnn']\n if self.flair_with_char_cnn:\n self.char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n total_embedding_dim += config['char_num_kernels']\n else:\n char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n self.token_embedding = TokenEmbedding(pretrained_embedding, char_cnn)\n total_embedding_dim = config['glove_dim'] + config['char_num_kernels']\n self.bi_rnn = BiRNN(total_embedding_dim, config['rnn_hidden_dim'], config['rnn_num_layers'], config['dropout'], config['rnn_type'], config['embed_dropout'], config['word_dropout'], config['locked_dropout'])\n self.emission = UnaryFactor(2 * config['rnn_hidden_dim'], label_vocab_size + 2, config['decoder_hidden_dim'], config['dropout'])\n self.transitions = torch.nn.Parameter(torch.randn(label_vocab_size + 2, label_vocab_size + 2))\n self.transitions.detach()[self.start_tag_index, :] = -10000\n self.transitions.detach()[:, self.stop_tag_index] = -10000\n<|end_body_0|>\n\n<|body_start_1|>\n if self.flair_enabled:\n embeddings = self.flair_embedding(strings)\n if self.flair_with_char_cnn:\n char_embeddings = self.char_cnn(token_chars)\n char_embeddings = char_embeddings.view(embeddings.shape[0], embeddings.shape[1], char_embeddings.shape[1])\n embeddings = torch.cat([embeddings, char_embeddings], dim=2)\n else:\n embeddings = self.token_embedding(tokens, token_chars)\n rnn_output = self.bi_rnn(embeddings, lengths)\n return (self.emission(rnn_output), self.transitions)\n<|end_body_1|>\n", "class_docstring": "Linear Chain CRF Model.", "class_name": "LinearChainCRFModel", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LinearChainCRFModel:\n \"\"\"Linear Chain CRF Model.\"\"\"\n\n def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int):\n \"\"\"Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.\"\"\"\n <|body_0|>\n\n def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor:\n \"\"\"Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.flair_enabled = config['flair_enabled']\n self.label_vocab_size = label_vocab_size\n self.start_tag_index = label_vocab_size\n self.stop_tag_index = label_vocab_size + 1\n if self.flair_enabled:\n flair_cache = None\n if config['flair_cache_enabled']:\n flair_cache = FlairEmbeddingCache()\n flair_cache.load_cache(config['flair_cache_path'], config['flair_cutoff_dim'])\n self.flair_embedding = FlairEmbeddings(config['language'], flair_cache)\n total_embedding_dim = self.flair_embedding.embedding_dim\n self.flair_with_char_cnn = config['flair_with_char_cnn']\n if self.flair_with_char_cnn:\n self.char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n total_embedding_dim += config['char_num_kernels']\n else:\n char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n self.token_embedding = TokenEmbedding(pretrained_embedding, char_cnn)\n total_embedding_dim = config['glove_dim'] + config['char_num_kernels']\n self.bi_rnn = BiRNN(total_embedding_dim, config['rnn_hidden_dim'], config['rnn_num_layers'], config['dropout'], config['rnn_type'], config['embed_dropout'], config['word_dropout'], config['locked_dropout'])\n self.emission = UnaryFactor(2 * config['rnn_hidden_dim'], label_vocab_size + 2, config['decoder_hidden_dim'], config['dropout'])\n self.transitions = torch.nn.Parameter(torch.randn(label_vocab_size + 2, label_vocab_size + 2))\n self.transitions.detach()[self.start_tag_index, :] = -10000\n self.transitions.detach()[:, self.stop_tag_index] = -10000\n<|end_body_0|>\n\n<|body_start_1|>\n if self.flair_enabled:\n embeddings = self.flair_embedding(strings)\n if self.flair_with_char_cnn:\n char_embeddings = self.char_cnn(token_chars)\n char_embeddings = char_embeddings.view(embeddings.shape[0], embeddings.shape[1], char_embeddings.shape[1])\n embeddings = torch.cat([embeddings, char_embeddings], dim=2)\n else:\n embeddings = self.token_embedding(tokens, token_chars)\n rnn_output = self.bi_rnn(embeddings, lengths)\n return (self.emission(rnn_output), self.transitions)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000094", "length_bytes": 12805, "license_type": "permissive", "methods": [{"docstring": "Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.", "name": "__init__", "signature": "def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int)"}, {"docstring": "Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores", "name": "forward", "signature": "def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052386", "prompt": "Implement the Python class `LinearChainCRFModel` described below.\n\nClass description:\nLinear Chain CRF Model.\n\nMethod signatures and docstrings:\n- def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int): Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.\n- def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor: Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores", "prompted_full_text": "Implement the Python class `LinearChainCRFModel` described below.\n\nClass description:\nLinear Chain CRF Model.\n\nMethod signatures and docstrings:\n- def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int): Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.\n- def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor: Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores\n\n<|skeleton|>\nclass LinearChainCRFModel:\n \"\"\"Linear Chain CRF Model.\"\"\"\n\n def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int):\n \"\"\"Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.\"\"\"\n <|body_0|>\n\n def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor:\n \"\"\"Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.flair_enabled = config['flair_enabled']\n self.label_vocab_size = label_vocab_size\n self.start_tag_index = label_vocab_size\n self.stop_tag_index = label_vocab_size + 1\n if self.flair_enabled:\n flair_cache = None\n if config['flair_cache_enabled']:\n flair_cache = FlairEmbeddingCache()\n flair_cache.load_cache(config['flair_cache_path'], config['flair_cutoff_dim'])\n self.flair_embedding = FlairEmbeddings(config['language'], flair_cache)\n total_embedding_dim = self.flair_embedding.embedding_dim\n self.flair_with_char_cnn = config['flair_with_char_cnn']\n if self.flair_with_char_cnn:\n self.char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n total_embedding_dim += config['char_num_kernels']\n else:\n char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n self.token_embedding = TokenEmbedding(pretrained_embedding, char_cnn)\n total_embedding_dim = config['glove_dim'] + config['char_num_kernels']\n self.bi_rnn = BiRNN(total_embedding_dim, config['rnn_hidden_dim'], config['rnn_num_layers'], config['dropout'], config['rnn_type'], config['embed_dropout'], config['word_dropout'], config['locked_dropout'])\n self.emission = UnaryFactor(2 * config['rnn_hidden_dim'], label_vocab_size + 2, config['decoder_hidden_dim'], config['dropout'])\n self.transitions = torch.nn.Parameter(torch.randn(label_vocab_size + 2, label_vocab_size + 2))\n self.transitions.detach()[self.start_tag_index, :] = -10000\n self.transitions.detach()[:, self.stop_tag_index] = -10000\n<|end_body_0|>\n\n<|body_start_1|>\n if self.flair_enabled:\n embeddings = self.flair_embedding(strings)\n if self.flair_with_char_cnn:\n char_embeddings = self.char_cnn(token_chars)\n char_embeddings = char_embeddings.view(embeddings.shape[0], embeddings.shape[1], char_embeddings.shape[1])\n embeddings = torch.cat([embeddings, char_embeddings], dim=2)\n else:\n embeddings = self.token_embedding(tokens, token_chars)\n rnn_output = self.bi_rnn(embeddings, lengths)\n return (self.emission(rnn_output), self.transitions)\n<|end_body_1|>\n", "revision_id": "8b4a7a40cc34bff608f19d3f7eb64bda76669c5b", "skeleton": "<|skeleton|>\nclass LinearChainCRFModel:\n \"\"\"Linear Chain CRF Model.\"\"\"\n\n def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int):\n \"\"\"Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.\"\"\"\n <|body_0|>\n\n def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor:\n \"\"\"Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LinearChainCRFModel:\n \"\"\"Linear Chain CRF Model.\"\"\"\n\n def __init__(self, config: dict, pretrained_embedding: Tensor, char_vocab_size: int, label_vocab_size: int):\n \"\"\"Assemble the factor model according to the config. Args: config: model configs and hyperparameters. pretrained_embedding: token embedding. char_vocab_size: size of the character vocabulary. label_vocab_size: size of the label space, i.e. output dim.\"\"\"\n super().__init__()\n self.flair_enabled = config['flair_enabled']\n self.label_vocab_size = label_vocab_size\n self.start_tag_index = label_vocab_size\n self.stop_tag_index = label_vocab_size + 1\n if self.flair_enabled:\n flair_cache = None\n if config['flair_cache_enabled']:\n flair_cache = FlairEmbeddingCache()\n flair_cache.load_cache(config['flair_cache_path'], config['flair_cutoff_dim'])\n self.flair_embedding = FlairEmbeddings(config['language'], flair_cache)\n total_embedding_dim = self.flair_embedding.embedding_dim\n self.flair_with_char_cnn = config['flair_with_char_cnn']\n if self.flair_with_char_cnn:\n self.char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n total_embedding_dim += config['char_num_kernels']\n else:\n char_cnn = CharCNN(char_vocab_size, config['char_embed_dim'], config['char_kernel_size'], config['char_num_kernels'], config['dropout'])\n self.token_embedding = TokenEmbedding(pretrained_embedding, char_cnn)\n total_embedding_dim = config['glove_dim'] + config['char_num_kernels']\n self.bi_rnn = BiRNN(total_embedding_dim, config['rnn_hidden_dim'], config['rnn_num_layers'], config['dropout'], config['rnn_type'], config['embed_dropout'], config['word_dropout'], config['locked_dropout'])\n self.emission = UnaryFactor(2 * config['rnn_hidden_dim'], label_vocab_size + 2, config['decoder_hidden_dim'], config['dropout'])\n self.transitions = torch.nn.Parameter(torch.randn(label_vocab_size + 2, label_vocab_size + 2))\n self.transitions.detach()[self.start_tag_index, :] = -10000\n self.transitions.detach()[:, self.stop_tag_index] = -10000\n\n def forward(self, tokens: Tensor, token_chars: Tensor, lengths: Tensor, strings: List[str]) -> Tensor:\n \"\"\"Args: tokens: the token indices. [batch_size, max_num_tokens] token_chars: the character indices for each token. Shape [batch_size * max_num_tokens, max_num_chars] lengths: length of each sentence in batch. shape [batch_size, ] strings: str sentences, len(strings) == batch_size Returns: emission_scores: [batch_size, max_num_tokens, num_labels] transition_scores\"\"\"\n if self.flair_enabled:\n embeddings = self.flair_embedding(strings)\n if self.flair_with_char_cnn:\n char_embeddings = self.char_cnn(token_chars)\n char_embeddings = char_embeddings.view(embeddings.shape[0], embeddings.shape[1], char_embeddings.shape[1])\n embeddings = torch.cat([embeddings, char_embeddings], dim=2)\n else:\n embeddings = self.token_embedding(tokens, token_chars)\n rnn_output = self.bi_rnn(embeddings, lengths)\n return (self.emission(rnn_output), self.transitions)\n", "source": "the_stack_v2_python_sparse", "source_path": "nsr/trainer/linear_chain_trainer.py", "source_repo": "GaoSida/Neural-SampleRank", "split": "test", "star_events_count": 3} {"blob_id": "000052d6b773593c1a7284b6c5a6e06309d5faf2", "bodies": ["import termios\nimport tty\noldtty = termios.tcgetattr(sys.stdin)\ntry:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n self.handle_communications()\nfinally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)", "import fcntl\nfd = sys.stdin.fileno()\nfl = fcntl.fcntl(fd, fcntl.F_GETFL)\nfcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\nself.handle_communications()", "while True:\n rl, wl, el = select.select([self.channel, sys.stdin], [], [])\n if self.channel in rl:\n if not self.process_channel(self.channel):\n break\n if sys.stdin in rl:\n if not self.process_stdin(self.channel):\n self.channel.shutdown_write()\n break"], "bodies_text": "<|body_start_0|>\n import termios\n import tty\n oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n self.handle_communications()\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)\n<|end_body_0|>\n\n<|body_start_1|>\n import fcntl\n fd = sys.stdin.fileno()\n fl = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n self.handle_communications()\n<|end_body_1|>\n\n<|body_start_2|>\n while True:\n rl, wl, el = select.select([self.channel, sys.stdin], [], [])\n if self.channel in rl:\n if not self.process_channel(self.channel):\n break\n if sys.stdin in rl:\n if not self.process_stdin(self.channel):\n self.channel.shutdown_write()\n break\n<|end_body_2|>\n", "class_docstring": "A platform handler for POSIX-type platforms.", "class_name": "PosixHandler", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PosixHandler:\n \"\"\"A platform handler for POSIX-type platforms.\"\"\"\n\n def shell(self):\n \"\"\"Open a shell.\"\"\"\n <|body_0|>\n\n def transfer(self):\n \"\"\"Transfer data over the channel.\"\"\"\n <|body_1|>\n\n def handle_communications(self):\n \"\"\"Handle any pending data over the channel or stdin.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import termios\n import tty\n oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n self.handle_communications()\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)\n<|end_body_0|>\n\n<|body_start_1|>\n import fcntl\n fd = sys.stdin.fileno()\n fl = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n self.handle_communications()\n<|end_body_1|>\n\n<|body_start_2|>\n while True:\n rl, wl, el = select.select([self.channel, sys.stdin], [], [])\n if self.channel in rl:\n if not self.process_channel(self.channel):\n break\n if sys.stdin in rl:\n if not self.process_stdin(self.channel):\n self.channel.shutdown_write()\n break\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000095", "length_bytes": 12220, "license_type": "permissive", "methods": [{"docstring": "Open a shell.", "name": "shell", "signature": "def shell(self)"}, {"docstring": "Transfer data over the channel.", "name": "transfer", "signature": "def transfer(self)"}, {"docstring": "Handle any pending data over the channel or stdin.", "name": "handle_communications", "signature": "def handle_communications(self)"}], "n_methods": 3, "prompt": "Implement the Python class `PosixHandler` described below.\n\nClass description:\nA platform handler for POSIX-type platforms.\n\nMethod signatures and docstrings:\n- def shell(self): Open a shell.\n- def transfer(self): Transfer data over the channel.\n- def handle_communications(self): Handle any pending data over the channel or stdin.", "prompted_full_text": "Implement the Python class `PosixHandler` described below.\n\nClass description:\nA platform handler for POSIX-type platforms.\n\nMethod signatures and docstrings:\n- def shell(self): Open a shell.\n- def transfer(self): Transfer data over the channel.\n- def handle_communications(self): Handle any pending data over the channel or stdin.\n\n<|skeleton|>\nclass PosixHandler:\n \"\"\"A platform handler for POSIX-type platforms.\"\"\"\n\n def shell(self):\n \"\"\"Open a shell.\"\"\"\n <|body_0|>\n\n def transfer(self):\n \"\"\"Transfer data over the channel.\"\"\"\n <|body_1|>\n\n def handle_communications(self):\n \"\"\"Handle any pending data over the channel or stdin.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n import termios\n import tty\n oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n self.handle_communications()\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)\n<|end_body_0|>\n\n<|body_start_1|>\n import fcntl\n fd = sys.stdin.fileno()\n fl = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n self.handle_communications()\n<|end_body_1|>\n\n<|body_start_2|>\n while True:\n rl, wl, el = select.select([self.channel, sys.stdin], [], [])\n if self.channel in rl:\n if not self.process_channel(self.channel):\n break\n if sys.stdin in rl:\n if not self.process_stdin(self.channel):\n self.channel.shutdown_write()\n break\n<|end_body_2|>\n", "revision_id": "563c1e8d4dfd860f372281dc0f380a0809f6ae15", "skeleton": "<|skeleton|>\nclass PosixHandler:\n \"\"\"A platform handler for POSIX-type platforms.\"\"\"\n\n def shell(self):\n \"\"\"Open a shell.\"\"\"\n <|body_0|>\n\n def transfer(self):\n \"\"\"Transfer data over the channel.\"\"\"\n <|body_1|>\n\n def handle_communications(self):\n \"\"\"Handle any pending data over the channel or stdin.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PosixHandler:\n \"\"\"A platform handler for POSIX-type platforms.\"\"\"\n\n def shell(self):\n \"\"\"Open a shell.\"\"\"\n import termios\n import tty\n oldtty = termios.tcgetattr(sys.stdin)\n try:\n tty.setraw(sys.stdin.fileno())\n tty.setcbreak(sys.stdin.fileno())\n self.handle_communications()\n finally:\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)\n\n def transfer(self):\n \"\"\"Transfer data over the channel.\"\"\"\n import fcntl\n fd = sys.stdin.fileno()\n fl = fcntl.fcntl(fd, fcntl.F_GETFL)\n fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)\n self.handle_communications()\n\n def handle_communications(self):\n \"\"\"Handle any pending data over the channel or stdin.\"\"\"\n while True:\n rl, wl, el = select.select([self.channel, sys.stdin], [], [])\n if self.channel in rl:\n if not self.process_channel(self.channel):\n break\n if sys.stdin in rl:\n if not self.process_stdin(self.channel):\n self.channel.shutdown_write()\n break\n", "source": "the_stack_v2_python_sparse", "source_path": "reviewboard/cmdline/rbssh.py", "source_repo": "LloydFinch/reviewboard", "split": "test", "star_events_count": 2} {"blob_id": "06a5055e0a72c6a0c7c5a7af85d1e3d45b2a140f", "bodies": ["p1 = a = headA\np2 = b = headB\na_length = 0\nb_length = 0\nwhile a:\n a_length += 1\n a = a.next\nwhile b:\n b_length += 1\n b = b.next\nif a_length > b_length:\n for i in range(a_length - b_length):\n p1 = p1.next\nelse:\n for i in range(b_length - a_length):\n p2 = p2.next\nwhile p1 and p2:\n if p1 == p2:\n return p1\n p1 = p1.next\n p2 = p2.next\nreturn None", "if headA is None or headB is None:\n return None\na = headA\nb = headB\nwhile a != b:\n a = headB if a is None else a.next\n b = headA if b is None else b.next\nreturn a"], "bodies_text": "<|body_start_0|>\n p1 = a = headA\n p2 = b = headB\n a_length = 0\n b_length = 0\n while a:\n a_length += 1\n a = a.next\n while b:\n b_length += 1\n b = b.next\n if a_length > b_length:\n for i in range(a_length - b_length):\n p1 = p1.next\n else:\n for i in range(b_length - a_length):\n p2 = p2.next\n while p1 and p2:\n if p1 == p2:\n return p1\n p1 = p1.next\n p2 = p2.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if headA is None or headB is None:\n return None\n a = headA\n b = headB\n while a != b:\n a = headB if a is None else a.next\n b = headA if b is None else b.next\n return a\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def getIntersectionNode(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def getIntersectionNode1(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n p1 = a = headA\n p2 = b = headB\n a_length = 0\n b_length = 0\n while a:\n a_length += 1\n a = a.next\n while b:\n b_length += 1\n b = b.next\n if a_length > b_length:\n for i in range(a_length - b_length):\n p1 = p1.next\n else:\n for i in range(b_length - a_length):\n p2 = p2.next\n while p1 and p2:\n if p1 == p2:\n return p1\n p1 = p1.next\n p2 = p2.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if headA is None or headB is None:\n return None\n a = headA\n b = headB\n while a != b:\n a = headB if a is None else a.next\n b = headA if b is None else b.next\n return a\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000096", "length_bytes": 2002, "license_type": "no_license", "methods": [{"docstring": ":type head1, head1: ListNode :rtype: ListNode", "name": "getIntersectionNode", "signature": "def getIntersectionNode(self, headA, headB)"}, {"docstring": ":type head1, head1: ListNode :rtype: ListNode", "name": "getIntersectionNode1", "signature": "def getIntersectionNode1(self, headA, headB)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046025", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def getIntersectionNode(self, headA, headB): :type head1, head1: ListNode :rtype: ListNode\n- def getIntersectionNode1(self, headA, headB): :type head1, head1: ListNode :rtype: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def getIntersectionNode(self, headA, headB): :type head1, head1: ListNode :rtype: ListNode\n- def getIntersectionNode1(self, headA, headB): :type head1, head1: ListNode :rtype: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def getIntersectionNode(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def getIntersectionNode1(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n p1 = a = headA\n p2 = b = headB\n a_length = 0\n b_length = 0\n while a:\n a_length += 1\n a = a.next\n while b:\n b_length += 1\n b = b.next\n if a_length > b_length:\n for i in range(a_length - b_length):\n p1 = p1.next\n else:\n for i in range(b_length - a_length):\n p2 = p2.next\n while p1 and p2:\n if p1 == p2:\n return p1\n p1 = p1.next\n p2 = p2.next\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if headA is None or headB is None:\n return None\n a = headA\n b = headB\n while a != b:\n a = headB if a is None else a.next\n b = headA if b is None else b.next\n return a\n<|end_body_1|>\n", "revision_id": "71a02a2c6bc12e86119502c9c4a4b2047b9f3966", "skeleton": "<|skeleton|>\nclass Solution:\n\n def getIntersectionNode(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n <|body_0|>\n\n def getIntersectionNode1(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def getIntersectionNode(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n p1 = a = headA\n p2 = b = headB\n a_length = 0\n b_length = 0\n while a:\n a_length += 1\n a = a.next\n while b:\n b_length += 1\n b = b.next\n if a_length > b_length:\n for i in range(a_length - b_length):\n p1 = p1.next\n else:\n for i in range(b_length - a_length):\n p2 = p2.next\n while p1 and p2:\n if p1 == p2:\n return p1\n p1 = p1.next\n p2 = p2.next\n return None\n\n def getIntersectionNode1(self, headA, headB):\n \"\"\":type head1, head1: ListNode :rtype: ListNode\"\"\"\n if headA is None or headB is None:\n return None\n a = headA\n b = headB\n while a != b:\n a = headB if a is None else a.next\n b = headA if b is None else b.next\n return a\n", "source": "the_stack_v2_python_sparse", "source_path": "Linked List/160. Intersection of Two Linked Lists (easy).py", "source_repo": "xilixjd/leetcode", "split": "test", "star_events_count": 1} {"blob_id": "f09174491a1962c1422dc7657e739981d44df171", "bodies": ["pw = PrintWriter(socket.getOutputStream())\npw.print_('GET /' + URLEncoder.encode(data, 'UTF-8') + ' HTTP/1.0\\r\\n')\npw.print_('Accept: text/delim\\r\\n')\npw.print_('Host: ' + hostField + '\\r\\n')\npw.print_('Sender: GAMESERVER\\r\\n')\npw.print_('Receiver: ' + playerName + '\\r\\n')\npw.print_('\\r\\n')\npw.print_('\\r\\n')\npw.flush()", "pw = PrintWriter(socket.getOutputStream())\npw.print_('POST / HTTP/1.0\\r\\n')\npw.print_('Accept: text/delim\\r\\n')\npw.print_('Host: ' + hostField + '\\r\\n')\npw.print_('Sender: GAMESERVER\\r\\n')\npw.print_('Receiver: ' + playerName + '\\r\\n')\npw.print_('Content-Type: text/acl\\r\\n')\npw.print_('Content-Length: ' + len(data) + '\\r\\n')\npw.print_('\\r\\n')\npw.print_(data)\npw.flush()", "pw = PrintWriter(socket.getOutputStream())\npw.print_('HTTP/1.0 200 OK\\r\\n')\npw.print_('Content-type: text/acl\\r\\n')\npw.print_('Content-length: ' + len(data) + '\\r\\n')\npw.print_('Access-Control-Allow-Origin: *\\r\\n')\npw.print_('Access-Control-Allow-Methods: POST, GET, OPTIONS\\r\\n')\npw.print_('Access-Control-Allow-Headers: Content-Type\\r\\n')\npw.print_('Access-Control-Allow-Age: 86400\\r\\n')\npw.print_('\\r\\n')\npw.print_(data)\npw.flush()"], "bodies_text": "<|body_start_0|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('GET /' + URLEncoder.encode(data, 'UTF-8') + ' HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_('\\r\\n')\n pw.flush()\n<|end_body_0|>\n\n<|body_start_1|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('POST / HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('Content-Type: text/acl\\r\\n')\n pw.print_('Content-Length: ' + len(data) + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n<|end_body_1|>\n\n<|body_start_2|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('HTTP/1.0 200 OK\\r\\n')\n pw.print_('Content-type: text/acl\\r\\n')\n pw.print_('Content-length: ' + len(data) + '\\r\\n')\n pw.print_('Access-Control-Allow-Origin: *\\r\\n')\n pw.print_('Access-Control-Allow-Methods: POST, GET, OPTIONS\\r\\n')\n pw.print_('Access-Control-Allow-Headers: Content-Type\\r\\n')\n pw.print_('Access-Control-Allow-Age: 86400\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n<|end_body_2|>\n", "class_docstring": "generated source for class HttpWriter", "class_name": "HttpWriter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HttpWriter:\n \"\"\"generated source for class HttpWriter\"\"\"\n\n def writeAsClientGET(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClientGET\"\"\"\n <|body_0|>\n\n def writeAsClient(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClient\"\"\"\n <|body_1|>\n\n def writeAsServer(cls, socket, data):\n \"\"\"generated source for method writeAsServer\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('GET /' + URLEncoder.encode(data, 'UTF-8') + ' HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_('\\r\\n')\n pw.flush()\n<|end_body_0|>\n\n<|body_start_1|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('POST / HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('Content-Type: text/acl\\r\\n')\n pw.print_('Content-Length: ' + len(data) + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n<|end_body_1|>\n\n<|body_start_2|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('HTTP/1.0 200 OK\\r\\n')\n pw.print_('Content-type: text/acl\\r\\n')\n pw.print_('Content-length: ' + len(data) + '\\r\\n')\n pw.print_('Access-Control-Allow-Origin: *\\r\\n')\n pw.print_('Access-Control-Allow-Methods: POST, GET, OPTIONS\\r\\n')\n pw.print_('Access-Control-Allow-Headers: Content-Type\\r\\n')\n pw.print_('Access-Control-Allow-Age: 86400\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000097", "length_bytes": 2068, "license_type": "permissive", "methods": [{"docstring": "generated source for method writeAsClientGET", "name": "writeAsClientGET", "signature": "def writeAsClientGET(cls, socket, hostField, data, playerName)"}, {"docstring": "generated source for method writeAsClient", "name": "writeAsClient", "signature": "def writeAsClient(cls, socket, hostField, data, playerName)"}, {"docstring": "generated source for method writeAsServer", "name": "writeAsServer", "signature": "def writeAsServer(cls, socket, data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_036585", "prompt": "Implement the Python class `HttpWriter` described below.\n\nClass description:\ngenerated source for class HttpWriter\n\nMethod signatures and docstrings:\n- def writeAsClientGET(cls, socket, hostField, data, playerName): generated source for method writeAsClientGET\n- def writeAsClient(cls, socket, hostField, data, playerName): generated source for method writeAsClient\n- def writeAsServer(cls, socket, data): generated source for method writeAsServer", "prompted_full_text": "Implement the Python class `HttpWriter` described below.\n\nClass description:\ngenerated source for class HttpWriter\n\nMethod signatures and docstrings:\n- def writeAsClientGET(cls, socket, hostField, data, playerName): generated source for method writeAsClientGET\n- def writeAsClient(cls, socket, hostField, data, playerName): generated source for method writeAsClient\n- def writeAsServer(cls, socket, data): generated source for method writeAsServer\n\n<|skeleton|>\nclass HttpWriter:\n \"\"\"generated source for class HttpWriter\"\"\"\n\n def writeAsClientGET(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClientGET\"\"\"\n <|body_0|>\n\n def writeAsClient(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClient\"\"\"\n <|body_1|>\n\n def writeAsServer(cls, socket, data):\n \"\"\"generated source for method writeAsServer\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('GET /' + URLEncoder.encode(data, 'UTF-8') + ' HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_('\\r\\n')\n pw.flush()\n<|end_body_0|>\n\n<|body_start_1|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('POST / HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('Content-Type: text/acl\\r\\n')\n pw.print_('Content-Length: ' + len(data) + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n<|end_body_1|>\n\n<|body_start_2|>\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('HTTP/1.0 200 OK\\r\\n')\n pw.print_('Content-type: text/acl\\r\\n')\n pw.print_('Content-length: ' + len(data) + '\\r\\n')\n pw.print_('Access-Control-Allow-Origin: *\\r\\n')\n pw.print_('Access-Control-Allow-Methods: POST, GET, OPTIONS\\r\\n')\n pw.print_('Access-Control-Allow-Headers: Content-Type\\r\\n')\n pw.print_('Access-Control-Allow-Age: 86400\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n<|end_body_2|>\n", "revision_id": "4e6e6e876c3a4294cd711647051da2d9c1836b60", "skeleton": "<|skeleton|>\nclass HttpWriter:\n \"\"\"generated source for class HttpWriter\"\"\"\n\n def writeAsClientGET(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClientGET\"\"\"\n <|body_0|>\n\n def writeAsClient(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClient\"\"\"\n <|body_1|>\n\n def writeAsServer(cls, socket, data):\n \"\"\"generated source for method writeAsServer\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HttpWriter:\n \"\"\"generated source for class HttpWriter\"\"\"\n\n def writeAsClientGET(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClientGET\"\"\"\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('GET /' + URLEncoder.encode(data, 'UTF-8') + ' HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_('\\r\\n')\n pw.flush()\n\n def writeAsClient(cls, socket, hostField, data, playerName):\n \"\"\"generated source for method writeAsClient\"\"\"\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('POST / HTTP/1.0\\r\\n')\n pw.print_('Accept: text/delim\\r\\n')\n pw.print_('Host: ' + hostField + '\\r\\n')\n pw.print_('Sender: GAMESERVER\\r\\n')\n pw.print_('Receiver: ' + playerName + '\\r\\n')\n pw.print_('Content-Type: text/acl\\r\\n')\n pw.print_('Content-Length: ' + len(data) + '\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n\n def writeAsServer(cls, socket, data):\n \"\"\"generated source for method writeAsServer\"\"\"\n pw = PrintWriter(socket.getOutputStream())\n pw.print_('HTTP/1.0 200 OK\\r\\n')\n pw.print_('Content-type: text/acl\\r\\n')\n pw.print_('Content-length: ' + len(data) + '\\r\\n')\n pw.print_('Access-Control-Allow-Origin: *\\r\\n')\n pw.print_('Access-Control-Allow-Methods: POST, GET, OPTIONS\\r\\n')\n pw.print_('Access-Control-Allow-Headers: Content-Type\\r\\n')\n pw.print_('Access-Control-Allow-Age: 86400\\r\\n')\n pw.print_('\\r\\n')\n pw.print_(data)\n pw.flush()\n", "source": "the_stack_v2_python_sparse", "source_path": "ggpy/cruft/autocode/HttpWriter.py", "source_repo": "hobson/ggpy", "split": "test", "star_events_count": 1} {"blob_id": "34cf7eaf13e5ddd7a3f842239df3205ec206b19b", "bodies": ["p[0] = p[1]\np_list = list(p)\nif isinstance(p_list[-1], dict):\n p[0].update(p_list[-1])\nelse:\n p[0]['database_name'] = p_list[-1]", "p[0] = p[1]\np_list = list(p)\np[0].update(p_list[-1])"], "bodies_text": "<|body_start_0|>\n p[0] = p[1]\n p_list = list(p)\n if isinstance(p_list[-1], dict):\n p[0].update(p_list[-1])\n else:\n p[0]['database_name'] = p_list[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n p[0] = p[1]\n p_list = list(p)\n p[0].update(p_list[-1])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Database", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Database:\n\n def p_database_base(self, p: List) -> None:\n \"\"\"database_base : CREATE DATABASE id | database_base clone\"\"\"\n <|body_0|>\n\n def p_expression_create_database(self, p: List) -> None:\n \"\"\"expr : expr database_base\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n p[0] = p[1]\n p_list = list(p)\n if isinstance(p_list[-1], dict):\n p[0].update(p_list[-1])\n else:\n p[0]['database_name'] = p_list[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n p[0] = p[1]\n p_list = list(p)\n p[0].update(p_list[-1])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000098", "length_bytes": 42571, "license_type": "permissive", "methods": [{"docstring": "database_base : CREATE DATABASE id | database_base clone", "name": "p_database_base", "signature": "def p_database_base(self, p: List) -> None"}, {"docstring": "expr : expr database_base", "name": "p_expression_create_database", "signature": "def p_expression_create_database(self, p: List) -> None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009099", "prompt": "Implement the Python class `Database` described below.\n\nClass description:\nImplement the Database class.\n\nMethod signatures and docstrings:\n- def p_database_base(self, p: List) -> None: database_base : CREATE DATABASE id | database_base clone\n- def p_expression_create_database(self, p: List) -> None: expr : expr database_base", "prompted_full_text": "Implement the Python class `Database` described below.\n\nClass description:\nImplement the Database class.\n\nMethod signatures and docstrings:\n- def p_database_base(self, p: List) -> None: database_base : CREATE DATABASE id | database_base clone\n- def p_expression_create_database(self, p: List) -> None: expr : expr database_base\n\n<|skeleton|>\nclass Database:\n\n def p_database_base(self, p: List) -> None:\n \"\"\"database_base : CREATE DATABASE id | database_base clone\"\"\"\n <|body_0|>\n\n def p_expression_create_database(self, p: List) -> None:\n \"\"\"expr : expr database_base\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n p[0] = p[1]\n p_list = list(p)\n if isinstance(p_list[-1], dict):\n p[0].update(p_list[-1])\n else:\n p[0]['database_name'] = p_list[-1]\n<|end_body_0|>\n\n<|body_start_1|>\n p[0] = p[1]\n p_list = list(p)\n p[0].update(p_list[-1])\n<|end_body_1|>\n", "revision_id": "8f69c9c3b58990f0d47dbe868fe4a572d51e2de7", "skeleton": "<|skeleton|>\nclass Database:\n\n def p_database_base(self, p: List) -> None:\n \"\"\"database_base : CREATE DATABASE id | database_base clone\"\"\"\n <|body_0|>\n\n def p_expression_create_database(self, p: List) -> None:\n \"\"\"expr : expr database_base\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Database:\n def p_database_base(self, p: List) -> None:\n \"\"\"database_base : CREATE DATABASE id | database_base clone\"\"\"\n p[0] = p[1]\n p_list = list(p)\n if isinstance(p_list[-1], dict):\n p[0].update(p_list[-1])\n else:\n p[0]['database_name'] = p_list[-1]\n\n def p_expression_create_database(self, p: List) -> None:\n \"\"\"expr : expr database_base\"\"\"\n p[0] = p[1]\n p_list = list(p)\n p[0].update(p_list[-1])\n", "source": "the_stack_v2_python_sparse", "source_path": "simple_ddl_parser/dialects/sql.py", "source_repo": "bjmc/simple-ddl-parser", "split": "test", "star_events_count": 0} {"blob_id": "ef10982b6e273e7456dff909c70456075cd34d19", "bodies": ["logs.log_info('You are using the vgK channel: Kv1p6 ')\nself.time_unit = 1000.0\nself.vrev = -65\nself.m = 1 / (1 + np.exp((V - -20.8) / -8.1))\nself.h = 1 / (1 + np.exp((V - -22.0) / 11.39))\nself._mpower = 1\nself._hpower = 1", "self._mInf = 1 / (1 + np.exp((V - -20.8) / -8.1))\nself._mTau = 30.0 / (1 + np.exp((V - -46.56) / 44.14))\nself._hInf = 1 / (1 + np.exp((V - -22.0) / 11.39))\nself._hTau = 5000.0 / (1 + np.exp((V - -46.56) / -44.14))"], "bodies_text": "<|body_start_0|>\n logs.log_info('You are using the vgK channel: Kv1p6 ')\n self.time_unit = 1000.0\n self.vrev = -65\n self.m = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self.h = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._mpower = 1\n self._hpower = 1\n<|end_body_0|>\n\n<|body_start_1|>\n self._mInf = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self._mTau = 30.0 / (1 + np.exp((V - -46.56) / 44.14))\n self._hInf = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._hTau = 5000.0 / (1 + np.exp((V - -46.56) / -44.14))\n<|end_body_1|>\n", "class_docstring": "Kv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun", "class_name": "Kv1p6", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Kv1p6:\n \"\"\"Kv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n <|body_0|>\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logs.log_info('You are using the vgK channel: Kv1p6 ')\n self.time_unit = 1000.0\n self.vrev = -65\n self.m = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self.h = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._mpower = 1\n self._hpower = 1\n<|end_body_0|>\n\n<|body_start_1|>\n self._mInf = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self._mTau = 30.0 / (1 + np.exp((V - -46.56) / 44.14))\n self._hInf = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._hTau = 5000.0 / (1 + np.exp((V - -46.56) / -44.14))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000099", "length_bytes": 24227, "license_type": "no_license", "methods": [{"docstring": "Run initialization calculation for m and h gates of the channel at starting Vmem value.", "name": "_init_state", "signature": "def _init_state(self, V)"}, {"docstring": "Update the state of m and h gates of the channel given their present value and present simulation Vmem.", "name": "_calculate_state", "signature": "def _calculate_state(self, V)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036517", "prompt": "Implement the Python class `Kv1p6` described below.\n\nClass description:\nKv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun\n\nMethod signatures and docstrings:\n- def _init_state(self, V): Run initialization calculation for m and h gates of the channel at starting Vmem value.\n- def _calculate_state(self, V): Update the state of m and h gates of the channel given their present value and present simulation Vmem.", "prompted_full_text": "Implement the Python class `Kv1p6` described below.\n\nClass description:\nKv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun\n\nMethod signatures and docstrings:\n- def _init_state(self, V): Run initialization calculation for m and h gates of the channel at starting Vmem value.\n- def _calculate_state(self, V): Update the state of m and h gates of the channel given their present value and present simulation Vmem.\n\n<|skeleton|>\nclass Kv1p6:\n \"\"\"Kv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n <|body_0|>\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logs.log_info('You are using the vgK channel: Kv1p6 ')\n self.time_unit = 1000.0\n self.vrev = -65\n self.m = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self.h = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._mpower = 1\n self._hpower = 1\n<|end_body_0|>\n\n<|body_start_1|>\n self._mInf = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self._mTau = 30.0 / (1 + np.exp((V - -46.56) / 44.14))\n self._hInf = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._hTau = 5000.0 / (1 + np.exp((V - -46.56) / -44.14))\n<|end_body_1|>\n", "revision_id": "dd03ff5e3df3ef48d887a6566a6286fcd168880b", "skeleton": "<|skeleton|>\nclass Kv1p6:\n \"\"\"Kv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n <|body_0|>\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Kv1p6:\n \"\"\"Kv1.6 model from Grupe et al. 1990. Reference: A Grupe et. al; EMBO J. 1990 Jun\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n logs.log_info('You are using the vgK channel: Kv1p6 ')\n self.time_unit = 1000.0\n self.vrev = -65\n self.m = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self.h = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._mpower = 1\n self._hpower = 1\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n self._mInf = 1 / (1 + np.exp((V - -20.8) / -8.1))\n self._mTau = 30.0 / (1 + np.exp((V - -46.56) / 44.14))\n self._hInf = 1 / (1 + np.exp((V - -22.0) / 11.39))\n self._hTau = 5000.0 / (1 + np.exp((V - -46.56) / -44.14))\n", "source": "the_stack_v2_python_sparse", "source_path": "betse/science/channels/vg_k.py", "source_repo": "R-Stefano/betse-ml", "split": "test", "star_events_count": 0} {"blob_id": "af320d3e81c03bd6e050f83269ef5e34b1626056", "bodies": ["self.number = {}\nf = open(filename, 'r')\nfor l in f:\n info = l.strip().split('\\t')\n self.number[info[0]] = info[1:]", "try:\n return self.number[plate]\nexcept KeyError:\n return None"], "bodies_text": "<|body_start_0|>\n self.number = {}\n f = open(filename, 'r')\n for l in f:\n info = l.strip().split('\\t')\n self.number[info[0]] = info[1:]\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return self.number[plate]\n except KeyError:\n return None\n<|end_body_1|>\n", "class_docstring": "Get info about the plate and process it", "class_name": "Plates", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Plates:\n \"\"\"Get info about the plate and process it\"\"\"\n\n def __init__(self, filename):\n \"\"\"Read the file and split each word\"\"\"\n <|body_0|>\n\n def findByPlate(self, plate):\n \"\"\"Find the plate and return None if not found\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.number = {}\n f = open(filename, 'r')\n for l in f:\n info = l.strip().split('\\t')\n self.number[info[0]] = info[1:]\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return self.number[plate]\n except KeyError:\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000100", "length_bytes": 913, "license_type": "no_license", "methods": [{"docstring": "Read the file and split each word", "name": "__init__", "signature": "def __init__(self, filename)"}, {"docstring": "Find the plate and return None if not found", "name": "findByPlate", "signature": "def findByPlate(self, plate)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043025", "prompt": "Implement the Python class `Plates` described below.\n\nClass description:\nGet info about the plate and process it\n\nMethod signatures and docstrings:\n- def __init__(self, filename): Read the file and split each word\n- def findByPlate(self, plate): Find the plate and return None if not found", "prompted_full_text": "Implement the Python class `Plates` described below.\n\nClass description:\nGet info about the plate and process it\n\nMethod signatures and docstrings:\n- def __init__(self, filename): Read the file and split each word\n- def findByPlate(self, plate): Find the plate and return None if not found\n\n<|skeleton|>\nclass Plates:\n \"\"\"Get info about the plate and process it\"\"\"\n\n def __init__(self, filename):\n \"\"\"Read the file and split each word\"\"\"\n <|body_0|>\n\n def findByPlate(self, plate):\n \"\"\"Find the plate and return None if not found\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.number = {}\n f = open(filename, 'r')\n for l in f:\n info = l.strip().split('\\t')\n self.number[info[0]] = info[1:]\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n return self.number[plate]\n except KeyError:\n return None\n<|end_body_1|>\n", "revision_id": "e31680c173f5f17bb45963499bc0d66e4c34ad71", "skeleton": "<|skeleton|>\nclass Plates:\n \"\"\"Get info about the plate and process it\"\"\"\n\n def __init__(self, filename):\n \"\"\"Read the file and split each word\"\"\"\n <|body_0|>\n\n def findByPlate(self, plate):\n \"\"\"Find the plate and return None if not found\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Plates:\n \"\"\"Get info about the plate and process it\"\"\"\n\n def __init__(self, filename):\n \"\"\"Read the file and split each word\"\"\"\n self.number = {}\n f = open(filename, 'r')\n for l in f:\n info = l.strip().split('\\t')\n self.number[info[0]] = info[1:]\n\n def findByPlate(self, plate):\n \"\"\"Find the plate and return None if not found\"\"\"\n try:\n return self.number[plate]\n except KeyError:\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "labD6 Simple Plate Finder/LabD6.py", "source_repo": "alejopijuan/Python-Projects", "split": "test", "star_events_count": 1} {"blob_id": "4d17eddcf93388cb9791be32c523630e38408ed7", "bodies": ["memo = {('', '', ''): True}\n\ndef dfs(s1, s2, s3):\n key, flag = ((s1, s2, s3), False)\n if key in memo:\n return memo.get(key)\n if s1 and s3 and (s1[0] == s3[0]) and dfs(s1[1:], s2, s3[1:]):\n flag = True\n if s2 and s3 and (s2[0] == s3[0]) and dfs(s1, s2[1:], s3[1:]):\n flag = True\n memo[key] = flag\n return flag\nreturn dfs(s1, s2, s3)", "len1, len2, len3 = (len(s1), len(s2), len(s3))\nif len1 + len2 != len3:\n return False\nif not s1 or not s2:\n return s1 == s3 or s2 == s3\ndp = [[False for y in range(len2 + 1)] for x in range(len1 + 1)]\ndp[0][0] = True\nfor i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n if dp[i - 1][j] and s1[i - 1] == s3[i + j - 1] or (dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]):\n dp[i][j] = True\nreturn dp[-1][-1]"], "bodies_text": "<|body_start_0|>\n memo = {('', '', ''): True}\n\n def dfs(s1, s2, s3):\n key, flag = ((s1, s2, s3), False)\n if key in memo:\n return memo.get(key)\n if s1 and s3 and (s1[0] == s3[0]) and dfs(s1[1:], s2, s3[1:]):\n flag = True\n if s2 and s3 and (s2[0] == s3[0]) and dfs(s1, s2[1:], s3[1:]):\n flag = True\n memo[key] = flag\n return flag\n return dfs(s1, s2, s3)\n<|end_body_0|>\n\n<|body_start_1|>\n len1, len2, len3 = (len(s1), len(s2), len(s3))\n if len1 + len2 != len3:\n return False\n if not s1 or not s2:\n return s1 == s3 or s2 == s3\n dp = [[False for y in range(len2 + 1)] for x in range(len1 + 1)]\n dp[0][0] = True\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n if dp[i - 1][j] and s1[i - 1] == s3[i + j - 1] or (dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]):\n dp[i][j] = True\n return dp[-1][-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isInterleave1(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/\"\"\"\n <|body_0|>\n\n def isInterleave2(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n memo = {('', '', ''): True}\n\n def dfs(s1, s2, s3):\n key, flag = ((s1, s2, s3), False)\n if key in memo:\n return memo.get(key)\n if s1 and s3 and (s1[0] == s3[0]) and dfs(s1[1:], s2, s3[1:]):\n flag = True\n if s2 and s3 and (s2[0] == s3[0]) and dfs(s1, s2[1:], s3[1:]):\n flag = True\n memo[key] = flag\n return flag\n return dfs(s1, s2, s3)\n<|end_body_0|>\n\n<|body_start_1|>\n len1, len2, len3 = (len(s1), len(s2), len(s3))\n if len1 + len2 != len3:\n return False\n if not s1 or not s2:\n return s1 == s3 or s2 == s3\n dp = [[False for y in range(len2 + 1)] for x in range(len1 + 1)]\n dp[0][0] = True\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n if dp[i - 1][j] and s1[i - 1] == s3[i + j - 1] or (dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]):\n dp[i][j] = True\n return dp[-1][-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000101", "length_bytes": 2889, "license_type": "no_license", "methods": [{"docstring": "DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/", "name": "isInterleave1", "signature": "def isInterleave1(self, s1: str, s2: str, s3: str) -> bool"}, {"docstring": "类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/", "name": "isInterleave2", "signature": "def isInterleave2(self, s1: str, s2: str, s3: str) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052227", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isInterleave1(self, s1: str, s2: str, s3: str) -> bool: DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/\n- def isInterleave2(self, s1: str, s2: str, s3: str) -> bool: 类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isInterleave1(self, s1: str, s2: str, s3: str) -> bool: DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/\n- def isInterleave2(self, s1: str, s2: str, s3: str) -> bool: 类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/\n\n<|skeleton|>\nclass Solution:\n\n def isInterleave1(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/\"\"\"\n <|body_0|>\n\n def isInterleave2(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n memo = {('', '', ''): True}\n\n def dfs(s1, s2, s3):\n key, flag = ((s1, s2, s3), False)\n if key in memo:\n return memo.get(key)\n if s1 and s3 and (s1[0] == s3[0]) and dfs(s1[1:], s2, s3[1:]):\n flag = True\n if s2 and s3 and (s2[0] == s3[0]) and dfs(s1, s2[1:], s3[1:]):\n flag = True\n memo[key] = flag\n return flag\n return dfs(s1, s2, s3)\n<|end_body_0|>\n\n<|body_start_1|>\n len1, len2, len3 = (len(s1), len(s2), len(s3))\n if len1 + len2 != len3:\n return False\n if not s1 or not s2:\n return s1 == s3 or s2 == s3\n dp = [[False for y in range(len2 + 1)] for x in range(len1 + 1)]\n dp[0][0] = True\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n if dp[i - 1][j] and s1[i - 1] == s3[i + j - 1] or (dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]):\n dp[i][j] = True\n return dp[-1][-1]\n<|end_body_1|>\n", "revision_id": "2bbb1640589aab34f2bc42489283033cc11fb885", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isInterleave1(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/\"\"\"\n <|body_0|>\n\n def isInterleave2(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isInterleave1(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"DFS+记忆优化 递归树参考:https://leetcode-cn.com/problems/interleaving-string/solution/shou-hua-tu- jie-dfshui-su-dfsji-yi-hua-by-hyj8/\"\"\"\n memo = {('', '', ''): True}\n\n def dfs(s1, s2, s3):\n key, flag = ((s1, s2, s3), False)\n if key in memo:\n return memo.get(key)\n if s1 and s3 and (s1[0] == s3[0]) and dfs(s1[1:], s2, s3[1:]):\n flag = True\n if s2 and s3 and (s2[0] == s3[0]) and dfs(s1, s2[1:], s3[1:]):\n flag = True\n memo[key] = flag\n return flag\n return dfs(s1, s2, s3)\n\n def isInterleave2(self, s1: str, s2: str, s3: str) -> bool:\n \"\"\"类似路径问题,使用DP 1.定义:dp[i][j]为s1前i个字符和s2[j]前j个字符是否能组成s3前i+j个字符 2.递推公式: dp[i][j] = dp[i-1][j]&&s1[i-1]==s3[i+j-1] | dp[i][j-1]&&s2[j-1]==s3[i+j-1] 参考:https://leetcode-cn.com/problems/interleaving-string/solution/lei-si-lu-jing-wen-ti-zhao-zhun-zhuang-tai-fang-ch/\"\"\"\n len1, len2, len3 = (len(s1), len(s2), len(s3))\n if len1 + len2 != len3:\n return False\n if not s1 or not s2:\n return s1 == s3 or s2 == s3\n dp = [[False for y in range(len2 + 1)] for x in range(len1 + 1)]\n dp[0][0] = True\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n if dp[i - 1][j] and s1[i - 1] == s3[i + j - 1] or (dp[i][j - 1] and s2[j - 1] == s3[i + j - 1]):\n dp[i][j] = True\n return dp[-1][-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "097_interleaving-string.py", "source_repo": "helloocc/algorithm", "split": "test", "star_events_count": 1} {"blob_id": "fd94796047c557b42d455180121d18b4c96ee72f", "bodies": ["from scoop.content.models.picture import Picture\nuuid = self.value\ncss_class = '{0}{1}'.format(' ' if 'class' in self.kwargs else '', self.kwargs.get('class', ''))\nimage = Picture.objects.get_by_uuid(uuid)\nreturn {'image': image, 'class': css_class}", "base = super(AnimationInline, self).get_template_name()[0]\npath = 'content/{}'.format(base)\nreturn path"], "bodies_text": "<|body_start_0|>\n from scoop.content.models.picture import Picture\n uuid = self.value\n css_class = '{0}{1}'.format(' ' if 'class' in self.kwargs else '', self.kwargs.get('class', ''))\n image = Picture.objects.get_by_uuid(uuid)\n return {'image': image, 'class': css_class}\n<|end_body_0|>\n\n<|body_start_1|>\n base = super(AnimationInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n<|end_body_1|>\n", "class_docstring": "Inline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}", "class_name": "AnimationInline", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AnimationInline:\n \"\"\"Inline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n <|body_0|>\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from scoop.content.models.picture import Picture\n uuid = self.value\n css_class = '{0}{1}'.format(' ' if 'class' in self.kwargs else '', self.kwargs.get('class', ''))\n image = Picture.objects.get_by_uuid(uuid)\n return {'image': image, 'class': css_class}\n<|end_body_0|>\n\n<|body_start_1|>\n base = super(AnimationInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000102", "length_bytes": 6816, "license_type": "no_license", "methods": [{"docstring": "Renvoyer le contexte de rendu de l'inline", "name": "get_context", "signature": "def get_context(self)"}, {"docstring": "Renvoyer le chemin du template", "name": "get_template_name", "signature": "def get_template_name(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025313", "prompt": "Implement the Python class `AnimationInline` described below.\n\nClass description:\nInline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}\n\nMethod signatures and docstrings:\n- def get_context(self): Renvoyer le contexte de rendu de l'inline\n- def get_template_name(self): Renvoyer le chemin du template", "prompted_full_text": "Implement the Python class `AnimationInline` described below.\n\nClass description:\nInline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}\n\nMethod signatures and docstrings:\n- def get_context(self): Renvoyer le contexte de rendu de l'inline\n- def get_template_name(self): Renvoyer le chemin du template\n\n<|skeleton|>\nclass AnimationInline:\n \"\"\"Inline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n <|body_0|>\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from scoop.content.models.picture import Picture\n uuid = self.value\n css_class = '{0}{1}'.format(' ' if 'class' in self.kwargs else '', self.kwargs.get('class', ''))\n image = Picture.objects.get_by_uuid(uuid)\n return {'image': image, 'class': css_class}\n<|end_body_0|>\n\n<|body_start_1|>\n base = super(AnimationInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n<|end_body_1|>\n", "revision_id": "8cef6f6e89c1990e2b25f83e54e0c3481d83b6d7", "skeleton": "<|skeleton|>\nclass AnimationInline:\n \"\"\"Inline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n <|body_0|>\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AnimationInline:\n \"\"\"Inline d'insertion d'animations Format : {{animation imageuuid [class=css]}} Exemple : {{animation dF4y8P class=\"bordered\"}}\"\"\"\n\n def get_context(self):\n \"\"\"Renvoyer le contexte de rendu de l'inline\"\"\"\n from scoop.content.models.picture import Picture\n uuid = self.value\n css_class = '{0}{1}'.format(' ' if 'class' in self.kwargs else '', self.kwargs.get('class', ''))\n image = Picture.objects.get_by_uuid(uuid)\n return {'image': image, 'class': css_class}\n\n def get_template_name(self):\n \"\"\"Renvoyer le chemin du template\"\"\"\n base = super(AnimationInline, self).get_template_name()[0]\n path = 'content/{}'.format(base)\n return path\n", "source": "the_stack_v2_python_sparse", "source_path": "scoop/content/util/inlines.py", "source_repo": "artscoop/scoop", "split": "test", "star_events_count": 0} {"blob_id": "1ddce4275ec1fe06af7697feb71879202e8574c2", "bodies": ["with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, active=0, email=prepare_email(length=10))\nwith allure.step('CREATE SECONDARY USER IN DB'):\n user_name_new = prepare_user_name(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name_new, user_pass=prepare_password(length=11), access=1, email=prepare_email(length=11), active=0)\nwith allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\nwith allure.step('DELETE SECONDARY USER'):\n resp = self.api_client.delete_user(user_name=user_name_new)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name_new).all()\n assert not mysql_data, f\"User '{user_name_new}' find in DB but is not\"\n assert resp.status_code == 400, f'Response return status code {resp.status_code}, expected 400'", "with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\nwith allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\nwith allure.step('DELETE MAIN USER'):\n resp = self.api_client.delete_user(user_name=user_name)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name).all()\n assert not mysql_data, f\"User '{user_name}' find in DB but is not\"\n assert resp.status_code == 204, f'Response return status code {resp.status_code}, expected 204'", "with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\nwith allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\nwith allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=prepare_user_name(length=11))\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'", "with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\nwith allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\nwith allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=None)\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'"], "bodies_text": "<|body_start_0|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, active=0, email=prepare_email(length=10))\n with allure.step('CREATE SECONDARY USER IN DB'):\n user_name_new = prepare_user_name(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name_new, user_pass=prepare_password(length=11), access=1, email=prepare_email(length=11), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE SECONDARY USER'):\n resp = self.api_client.delete_user(user_name=user_name_new)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name_new).all()\n assert not mysql_data, f\"User '{user_name_new}' find in DB but is not\"\n assert resp.status_code == 400, f'Response return status code {resp.status_code}, expected 400'\n<|end_body_0|>\n\n<|body_start_1|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE MAIN USER'):\n resp = self.api_client.delete_user(user_name=user_name)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name).all()\n assert not mysql_data, f\"User '{user_name}' find in DB but is not\"\n assert resp.status_code == 204, f'Response return status code {resp.status_code}, expected 204'\n<|end_body_1|>\n\n<|body_start_2|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=prepare_user_name(length=11))\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n<|end_body_2|>\n\n<|body_start_3|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=None)\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n<|end_body_3|>\n", "class_docstring": "There are 4 test-cases", "class_name": "TestApiDeleteUser", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestApiDeleteUser:\n \"\"\"There are 4 test-cases\"\"\"\n\n def test_delete_exist_user(self):\n \"\"\"Test to remove secondary user by primary user\"\"\"\n <|body_0|>\n\n def test_delete_login_user(self):\n \"\"\"Test to remove primary user\"\"\"\n <|body_1|>\n\n def test_delete_non_existent_user(self):\n \"\"\"Test to remove not created user in DB\"\"\"\n <|body_2|>\n\n def test_delete_empty_name(self):\n \"\"\"Test to remove empty user in DB\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, active=0, email=prepare_email(length=10))\n with allure.step('CREATE SECONDARY USER IN DB'):\n user_name_new = prepare_user_name(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name_new, user_pass=prepare_password(length=11), access=1, email=prepare_email(length=11), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE SECONDARY USER'):\n resp = self.api_client.delete_user(user_name=user_name_new)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name_new).all()\n assert not mysql_data, f\"User '{user_name_new}' find in DB but is not\"\n assert resp.status_code == 400, f'Response return status code {resp.status_code}, expected 400'\n<|end_body_0|>\n\n<|body_start_1|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE MAIN USER'):\n resp = self.api_client.delete_user(user_name=user_name)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name).all()\n assert not mysql_data, f\"User '{user_name}' find in DB but is not\"\n assert resp.status_code == 204, f'Response return status code {resp.status_code}, expected 204'\n<|end_body_1|>\n\n<|body_start_2|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=prepare_user_name(length=11))\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n<|end_body_2|>\n\n<|body_start_3|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=None)\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000103", "length_bytes": 4531, "license_type": "no_license", "methods": [{"docstring": "Test to remove secondary user by primary user", "name": "test_delete_exist_user", "signature": "def test_delete_exist_user(self)"}, {"docstring": "Test to remove primary user", "name": "test_delete_login_user", "signature": "def test_delete_login_user(self)"}, {"docstring": "Test to remove not created user in DB", "name": "test_delete_non_existent_user", "signature": "def test_delete_non_existent_user(self)"}, {"docstring": "Test to remove empty user in DB", "name": "test_delete_empty_name", "signature": "def test_delete_empty_name(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_003101", "prompt": "Implement the Python class `TestApiDeleteUser` described below.\n\nClass description:\nThere are 4 test-cases\n\nMethod signatures and docstrings:\n- def test_delete_exist_user(self): Test to remove secondary user by primary user\n- def test_delete_login_user(self): Test to remove primary user\n- def test_delete_non_existent_user(self): Test to remove not created user in DB\n- def test_delete_empty_name(self): Test to remove empty user in DB", "prompted_full_text": "Implement the Python class `TestApiDeleteUser` described below.\n\nClass description:\nThere are 4 test-cases\n\nMethod signatures and docstrings:\n- def test_delete_exist_user(self): Test to remove secondary user by primary user\n- def test_delete_login_user(self): Test to remove primary user\n- def test_delete_non_existent_user(self): Test to remove not created user in DB\n- def test_delete_empty_name(self): Test to remove empty user in DB\n\n<|skeleton|>\nclass TestApiDeleteUser:\n \"\"\"There are 4 test-cases\"\"\"\n\n def test_delete_exist_user(self):\n \"\"\"Test to remove secondary user by primary user\"\"\"\n <|body_0|>\n\n def test_delete_login_user(self):\n \"\"\"Test to remove primary user\"\"\"\n <|body_1|>\n\n def test_delete_non_existent_user(self):\n \"\"\"Test to remove not created user in DB\"\"\"\n <|body_2|>\n\n def test_delete_empty_name(self):\n \"\"\"Test to remove empty user in DB\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, active=0, email=prepare_email(length=10))\n with allure.step('CREATE SECONDARY USER IN DB'):\n user_name_new = prepare_user_name(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name_new, user_pass=prepare_password(length=11), access=1, email=prepare_email(length=11), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE SECONDARY USER'):\n resp = self.api_client.delete_user(user_name=user_name_new)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name_new).all()\n assert not mysql_data, f\"User '{user_name_new}' find in DB but is not\"\n assert resp.status_code == 400, f'Response return status code {resp.status_code}, expected 400'\n<|end_body_0|>\n\n<|body_start_1|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE MAIN USER'):\n resp = self.api_client.delete_user(user_name=user_name)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name).all()\n assert not mysql_data, f\"User '{user_name}' find in DB but is not\"\n assert resp.status_code == 204, f'Response return status code {resp.status_code}, expected 204'\n<|end_body_1|>\n\n<|body_start_2|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=prepare_user_name(length=11))\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n<|end_body_2|>\n\n<|body_start_3|>\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=None)\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n<|end_body_3|>\n", "revision_id": "37444490cb4de7e4f2bcafe2b9b1d83f0a8d6b68", "skeleton": "<|skeleton|>\nclass TestApiDeleteUser:\n \"\"\"There are 4 test-cases\"\"\"\n\n def test_delete_exist_user(self):\n \"\"\"Test to remove secondary user by primary user\"\"\"\n <|body_0|>\n\n def test_delete_login_user(self):\n \"\"\"Test to remove primary user\"\"\"\n <|body_1|>\n\n def test_delete_non_existent_user(self):\n \"\"\"Test to remove not created user in DB\"\"\"\n <|body_2|>\n\n def test_delete_empty_name(self):\n \"\"\"Test to remove empty user in DB\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestApiDeleteUser:\n \"\"\"There are 4 test-cases\"\"\"\n\n def test_delete_exist_user(self):\n \"\"\"Test to remove secondary user by primary user\"\"\"\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, active=0, email=prepare_email(length=10))\n with allure.step('CREATE SECONDARY USER IN DB'):\n user_name_new = prepare_user_name(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name_new, user_pass=prepare_password(length=11), access=1, email=prepare_email(length=11), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE SECONDARY USER'):\n resp = self.api_client.delete_user(user_name=user_name_new)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name_new).all()\n assert not mysql_data, f\"User '{user_name_new}' find in DB but is not\"\n assert resp.status_code == 400, f'Response return status code {resp.status_code}, expected 400'\n\n def test_delete_login_user(self):\n \"\"\"Test to remove primary user\"\"\"\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE MAIN USER'):\n resp = self.api_client.delete_user(user_name=user_name)\n mysql_data = self.mysql_client.session.query(DataBaseUsers).order_by(DataBaseUsers.id.desc()).filter_by(username=user_name).all()\n assert not mysql_data, f\"User '{user_name}' find in DB but is not\"\n assert resp.status_code == 204, f'Response return status code {resp.status_code}, expected 204'\n\n def test_delete_non_existent_user(self):\n \"\"\"Test to remove not created user in DB\"\"\"\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=prepare_user_name(length=11))\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n\n def test_delete_empty_name(self):\n \"\"\"Test to remove empty user in DB\"\"\"\n with allure.step('CREATE MAIN USER IN DB'):\n user_name = prepare_user_name(length=10)\n user_pass = prepare_password(length=10)\n self.mysql_builder.add_user_in_base(user_name=user_name, user_pass=user_pass, access=1, email=prepare_email(length=10), active=0)\n with allure.step('LOGIN IN APP'):\n self.api_client.login_in_api(user_pass=user_pass, user_name=user_name)\n with allure.step('DELETE NOT CREATED USER'):\n resp = self.api_client.delete_user(user_name=None)\n assert resp.status_code == 404, f'Response return status code {resp.status_code}, expected 404'\n", "source": "the_stack_v2_python_sparse", "source_path": "final_project/code/test_api/test_delete_user.py", "source_repo": "yaroslav-94/2021-1-MAILRU-SDET-Python-Y-Yanin", "split": "test", "star_events_count": 0} {"blob_id": "30d2e156aa3e4b2352c8b054cd03835d444e1a85", "bodies": ["pdata_current_point = pv.PolyData(curr_animal_point)\npc_current_point = pdata_current_point.glyph(scale=False, geom=point_location_circle)\nself.plots_data[plot_name] = {'pdata_current_point': pdata_current_point, 'pc_current_point': pc_current_point}\nself.plots[plot_name] = self.p.add_mesh(pc_current_point, name=plot_name, render=render, **{'color': 'green', 'ambient': 0.6, 'opacity': 0.5, 'show_edges': True, 'edge_color': [0.05, 0.8, 0.08], 'line_width': 3.0, 'nan_opacity': 0.0, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\nreturn (self.plots[plot_name], self.plots_data[plot_name])", "point_cloud_fixedSegements_positionTrail = np.column_stack((arr_x, arr_y, arr_z))\npdata_positionTrail = pv.PolyData(point_cloud_fixedSegements_positionTrail.copy())\nactive_num_samples = len(arr_x)\nif trail_fade_values is not None:\n pdata_positionTrail.point_data['pho_fade_values'] = trail_fade_values[-active_num_samples:]\n scalars_arg = 'pho_fade_values'\nelse:\n scalars_arg = None\nif trail_point_size_values is not None:\n pdata_positionTrail.point_data['pho_size_values'] = trail_point_size_values[-active_num_samples:]\n point_size_scale_arg = 'pho_size_values'\nelse:\n point_size_scale_arg = None\npc_positionTrail = pdata_positionTrail.glyph(scale=point_size_scale_arg, geom=point_location_trail_circle)\nself.plots_data[plot_name] = {'point_cloud_fixedSegements_positionTrail': point_cloud_fixedSegements_positionTrail, 'pdata_positionTrail': pdata_positionTrail, 'pc_positionTrail': pc_positionTrail}\nself.plots[plot_name] = self.p.add_mesh(pc_positionTrail, name=plot_name, render=render, **{'ambient': 0.6, 'opacity': 'linear_r', 'scalars': scalars_arg, 'nan_opacity': 0.0, 'show_edges': False, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\nreturn (self.plots[plot_name], self.plots_data[plot_name])"], "bodies_text": "<|body_start_0|>\n pdata_current_point = pv.PolyData(curr_animal_point)\n pc_current_point = pdata_current_point.glyph(scale=False, geom=point_location_circle)\n self.plots_data[plot_name] = {'pdata_current_point': pdata_current_point, 'pc_current_point': pc_current_point}\n self.plots[plot_name] = self.p.add_mesh(pc_current_point, name=plot_name, render=render, **{'color': 'green', 'ambient': 0.6, 'opacity': 0.5, 'show_edges': True, 'edge_color': [0.05, 0.8, 0.08], 'line_width': 3.0, 'nan_opacity': 0.0, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n<|end_body_0|>\n\n<|body_start_1|>\n point_cloud_fixedSegements_positionTrail = np.column_stack((arr_x, arr_y, arr_z))\n pdata_positionTrail = pv.PolyData(point_cloud_fixedSegements_positionTrail.copy())\n active_num_samples = len(arr_x)\n if trail_fade_values is not None:\n pdata_positionTrail.point_data['pho_fade_values'] = trail_fade_values[-active_num_samples:]\n scalars_arg = 'pho_fade_values'\n else:\n scalars_arg = None\n if trail_point_size_values is not None:\n pdata_positionTrail.point_data['pho_size_values'] = trail_point_size_values[-active_num_samples:]\n point_size_scale_arg = 'pho_size_values'\n else:\n point_size_scale_arg = None\n pc_positionTrail = pdata_positionTrail.glyph(scale=point_size_scale_arg, geom=point_location_trail_circle)\n self.plots_data[plot_name] = {'point_cloud_fixedSegements_positionTrail': point_cloud_fixedSegements_positionTrail, 'pdata_positionTrail': pdata_positionTrail, 'pc_positionTrail': pc_positionTrail}\n self.plots[plot_name] = self.p.add_mesh(pc_positionTrail, name=plot_name, render=render, **{'ambient': 0.6, 'opacity': 'linear_r', 'scalars': scalars_arg, 'nan_opacity': 0.0, 'show_edges': False, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n<|end_body_1|>\n", "class_docstring": "Implementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)", "class_name": "InteractivePyvistaPlotter_PointAndPathPlottingMixin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InteractivePyvistaPlotter_PointAndPathPlottingMixin:\n \"\"\"Implementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)\"\"\"\n\n def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs):\n \"\"\"will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n <|body_0|>\n\n def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs):\n \"\"\"will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pdata_current_point = pv.PolyData(curr_animal_point)\n pc_current_point = pdata_current_point.glyph(scale=False, geom=point_location_circle)\n self.plots_data[plot_name] = {'pdata_current_point': pdata_current_point, 'pc_current_point': pc_current_point}\n self.plots[plot_name] = self.p.add_mesh(pc_current_point, name=plot_name, render=render, **{'color': 'green', 'ambient': 0.6, 'opacity': 0.5, 'show_edges': True, 'edge_color': [0.05, 0.8, 0.08], 'line_width': 3.0, 'nan_opacity': 0.0, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n<|end_body_0|>\n\n<|body_start_1|>\n point_cloud_fixedSegements_positionTrail = np.column_stack((arr_x, arr_y, arr_z))\n pdata_positionTrail = pv.PolyData(point_cloud_fixedSegements_positionTrail.copy())\n active_num_samples = len(arr_x)\n if trail_fade_values is not None:\n pdata_positionTrail.point_data['pho_fade_values'] = trail_fade_values[-active_num_samples:]\n scalars_arg = 'pho_fade_values'\n else:\n scalars_arg = None\n if trail_point_size_values is not None:\n pdata_positionTrail.point_data['pho_size_values'] = trail_point_size_values[-active_num_samples:]\n point_size_scale_arg = 'pho_size_values'\n else:\n point_size_scale_arg = None\n pc_positionTrail = pdata_positionTrail.glyph(scale=point_size_scale_arg, geom=point_location_trail_circle)\n self.plots_data[plot_name] = {'point_cloud_fixedSegements_positionTrail': point_cloud_fixedSegements_positionTrail, 'pdata_positionTrail': pdata_positionTrail, 'pc_positionTrail': pc_positionTrail}\n self.plots[plot_name] = self.p.add_mesh(pc_positionTrail, name=plot_name, render=render, **{'ambient': 0.6, 'opacity': 'linear_r', 'scalars': scalars_arg, 'nan_opacity': 0.0, 'show_edges': False, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000104", "length_bytes": 7895, "license_type": "permissive", "methods": [{"docstring": "will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.", "name": "perform_plot_location_point", "signature": "def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs)"}, {"docstring": "will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.", "name": "perform_plot_location_trail", "signature": "def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029219", "prompt": "Implement the Python class `InteractivePyvistaPlotter_PointAndPathPlottingMixin` described below.\n\nClass description:\nImplementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)\n\nMethod signatures and docstrings:\n- def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs): will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\n- def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs): will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.", "prompted_full_text": "Implement the Python class `InteractivePyvistaPlotter_PointAndPathPlottingMixin` described below.\n\nClass description:\nImplementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)\n\nMethod signatures and docstrings:\n- def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs): will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\n- def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs): will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\n\n<|skeleton|>\nclass InteractivePyvistaPlotter_PointAndPathPlottingMixin:\n \"\"\"Implementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)\"\"\"\n\n def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs):\n \"\"\"will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n <|body_0|>\n\n def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs):\n \"\"\"will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pdata_current_point = pv.PolyData(curr_animal_point)\n pc_current_point = pdata_current_point.glyph(scale=False, geom=point_location_circle)\n self.plots_data[plot_name] = {'pdata_current_point': pdata_current_point, 'pc_current_point': pc_current_point}\n self.plots[plot_name] = self.p.add_mesh(pc_current_point, name=plot_name, render=render, **{'color': 'green', 'ambient': 0.6, 'opacity': 0.5, 'show_edges': True, 'edge_color': [0.05, 0.8, 0.08], 'line_width': 3.0, 'nan_opacity': 0.0, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n<|end_body_0|>\n\n<|body_start_1|>\n point_cloud_fixedSegements_positionTrail = np.column_stack((arr_x, arr_y, arr_z))\n pdata_positionTrail = pv.PolyData(point_cloud_fixedSegements_positionTrail.copy())\n active_num_samples = len(arr_x)\n if trail_fade_values is not None:\n pdata_positionTrail.point_data['pho_fade_values'] = trail_fade_values[-active_num_samples:]\n scalars_arg = 'pho_fade_values'\n else:\n scalars_arg = None\n if trail_point_size_values is not None:\n pdata_positionTrail.point_data['pho_size_values'] = trail_point_size_values[-active_num_samples:]\n point_size_scale_arg = 'pho_size_values'\n else:\n point_size_scale_arg = None\n pc_positionTrail = pdata_positionTrail.glyph(scale=point_size_scale_arg, geom=point_location_trail_circle)\n self.plots_data[plot_name] = {'point_cloud_fixedSegements_positionTrail': point_cloud_fixedSegements_positionTrail, 'pdata_positionTrail': pdata_positionTrail, 'pc_positionTrail': pc_positionTrail}\n self.plots[plot_name] = self.p.add_mesh(pc_positionTrail, name=plot_name, render=render, **{'ambient': 0.6, 'opacity': 'linear_r', 'scalars': scalars_arg, 'nan_opacity': 0.0, 'show_edges': False, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n<|end_body_1|>\n", "revision_id": "212399d826284b394fce8894ff1a93133aef783f", "skeleton": "<|skeleton|>\nclass InteractivePyvistaPlotter_PointAndPathPlottingMixin:\n \"\"\"Implementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)\"\"\"\n\n def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs):\n \"\"\"will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n <|body_0|>\n\n def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs):\n \"\"\"will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InteractivePyvistaPlotter_PointAndPathPlottingMixin:\n \"\"\"Implementor can render location points and paths/trails in the plotter Requires (Implementor Must Provide): p plots plots_data Provides: Provided Properties: None Provided Methods: perform_plot_location_point(...) perform_plot_location_trail(...)\"\"\"\n\n def perform_plot_location_point(self, plot_name, curr_animal_point, render=True, **kwargs):\n \"\"\"will render a flat indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n pdata_current_point = pv.PolyData(curr_animal_point)\n pc_current_point = pdata_current_point.glyph(scale=False, geom=point_location_circle)\n self.plots_data[plot_name] = {'pdata_current_point': pdata_current_point, 'pc_current_point': pc_current_point}\n self.plots[plot_name] = self.p.add_mesh(pc_current_point, name=plot_name, render=render, **{'color': 'green', 'ambient': 0.6, 'opacity': 0.5, 'show_edges': True, 'edge_color': [0.05, 0.8, 0.08], 'line_width': 3.0, 'nan_opacity': 0.0, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n\n def perform_plot_location_trail(self, plot_name, arr_x, arr_y, arr_z, render=True, trail_fade_values=None, trail_point_size_values=None, **kwargs):\n \"\"\"will render a series of points as a trajectory/path given arr_x, arr_y, and arr_z vectors of the same length. indicator of a single point like is used for the animal's current location. Updates the existing plot if the same plot_name is reused.\"\"\"\n point_cloud_fixedSegements_positionTrail = np.column_stack((arr_x, arr_y, arr_z))\n pdata_positionTrail = pv.PolyData(point_cloud_fixedSegements_positionTrail.copy())\n active_num_samples = len(arr_x)\n if trail_fade_values is not None:\n pdata_positionTrail.point_data['pho_fade_values'] = trail_fade_values[-active_num_samples:]\n scalars_arg = 'pho_fade_values'\n else:\n scalars_arg = None\n if trail_point_size_values is not None:\n pdata_positionTrail.point_data['pho_size_values'] = trail_point_size_values[-active_num_samples:]\n point_size_scale_arg = 'pho_size_values'\n else:\n point_size_scale_arg = None\n pc_positionTrail = pdata_positionTrail.glyph(scale=point_size_scale_arg, geom=point_location_trail_circle)\n self.plots_data[plot_name] = {'point_cloud_fixedSegements_positionTrail': point_cloud_fixedSegements_positionTrail, 'pdata_positionTrail': pdata_positionTrail, 'pc_positionTrail': pc_positionTrail}\n self.plots[plot_name] = self.p.add_mesh(pc_positionTrail, name=plot_name, render=render, **{'ambient': 0.6, 'opacity': 'linear_r', 'scalars': scalars_arg, 'nan_opacity': 0.0, 'show_edges': False, 'render_lines_as_tubes': True, 'show_scalar_bar': False, 'use_transparency': True} | kwargs)\n return (self.plots[plot_name], self.plots_data[plot_name])\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pyphoplacecellanalysis/GUI/PyVista/InteractivePlotter/Mixins/InteractivePlotterMixins.py", "source_repo": "CommanderPho/pyPhoPlaceCellAnalysis", "split": "test", "star_events_count": 1} {"blob_id": "6225e590e6010b23c096c96b2d32a108de83c725", "bodies": ["self.narticulators = narticulators\nself.duration_ms = duration_ms\nself.time_points_ms = time_points_ms\nself.ntimepoints = len(time_points_ms)", "synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))\nseg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])\nreturn seg.rms"], "bodies_text": "<|body_start_0|>\n self.narticulators = narticulators\n self.duration_ms = duration_ms\n self.time_points_ms = time_points_ms\n self.ntimepoints = len(time_points_ms)\n<|end_body_0|>\n\n<|body_start_1|>\n synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))\n seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])\n return seg.rms\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ParallelizableFitnessFunctionPhase0", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParallelizableFitnessFunctionPhase0:\n\n def __init__(self, narticulators, duration_ms, time_points_ms):\n \"\"\":param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.\"\"\"\n <|body_0|>\n\n def __call__(self, agent):\n \"\"\"This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.narticulators = narticulators\n self.duration_ms = duration_ms\n self.time_points_ms = time_points_ms\n self.ntimepoints = len(time_points_ms)\n<|end_body_0|>\n\n<|body_start_1|>\n synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))\n seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])\n return seg.rms\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000105", "length_bytes": 37918, "license_type": "permissive", "methods": [{"docstring": ":param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.", "name": "__init__", "signature": "def __init__(self, narticulators, duration_ms, time_points_ms)"}, {"docstring": "This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.", "name": "__call__", "signature": "def __call__(self, agent)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029831", "prompt": "Implement the Python class `ParallelizableFitnessFunctionPhase0` described below.\n\nClass description:\nImplement the ParallelizableFitnessFunctionPhase0 class.\n\nMethod signatures and docstrings:\n- def __init__(self, narticulators, duration_ms, time_points_ms): :param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.\n- def __call__(self, agent): This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.", "prompted_full_text": "Implement the Python class `ParallelizableFitnessFunctionPhase0` described below.\n\nClass description:\nImplement the ParallelizableFitnessFunctionPhase0 class.\n\nMethod signatures and docstrings:\n- def __init__(self, narticulators, duration_ms, time_points_ms): :param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.\n- def __call__(self, agent): This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.\n\n<|skeleton|>\nclass ParallelizableFitnessFunctionPhase0:\n\n def __init__(self, narticulators, duration_ms, time_points_ms):\n \"\"\":param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.\"\"\"\n <|body_0|>\n\n def __call__(self, agent):\n \"\"\"This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.narticulators = narticulators\n self.duration_ms = duration_ms\n self.time_points_ms = time_points_ms\n self.ntimepoints = len(time_points_ms)\n<|end_body_0|>\n\n<|body_start_1|>\n synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))\n seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])\n return seg.rms\n<|end_body_1|>\n", "revision_id": "1edbb171a5405d2971227f2d2d83acb523c70034", "skeleton": "<|skeleton|>\nclass ParallelizableFitnessFunctionPhase0:\n\n def __init__(self, narticulators, duration_ms, time_points_ms):\n \"\"\":param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.\"\"\"\n <|body_0|>\n\n def __call__(self, agent):\n \"\"\"This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ParallelizableFitnessFunctionPhase0:\n def __init__(self, narticulators, duration_ms, time_points_ms):\n \"\"\":param narticulators: How many articulators? :param duration_ms: The total ms of articulation we should create from each agent. :param time_points_ms: The time points (in ms) at which to change the values of each articulator.\"\"\"\n self.narticulators = narticulators\n self.duration_ms = duration_ms\n self.time_points_ms = time_points_ms\n self.ntimepoints = len(time_points_ms)\n\n def __call__(self, agent):\n \"\"\"This fitness function evaluates an agent on how much sound it makes when run through the articulatory synthesizer as a synthmat.\"\"\"\n synthmat = np.reshape(agent, (self.narticulators, self.ntimepoints))\n seg = synth.make_seg_from_synthmat(synthmat, self.duration_ms / 1000.0, [tp / 1000.0 for tp in self.time_points_ms])\n return seg.rms\n", "source": "the_stack_v2_python_sparse", "source_path": "Artie/internals/motorcortex/motorcortex.py", "source_repo": "MaxStrange/ArtieInfant", "split": "test", "star_events_count": 1} {"blob_id": "217a341aee4b7786ca130dac10d7d26db2465c58", "bodies": ["ext = []\nif self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\nreturn ext", "info = {}\nif pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\nelse:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\nif pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\nreturn info"], "bodies_text": "<|body_start_0|>\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n<|end_body_0|>\n\n<|body_start_1|>\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n<|end_body_1|>\n", "class_docstring": "Compute α but requires storing individual gradients.", "class_name": "AlphaExpensive", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n <|body_0|>\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n<|end_body_0|>\n\n<|body_start_1|>\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000106", "length_bytes": 16011, "license_type": "permissive", "methods": [{"docstring": "Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.", "name": "extensions", "signature": "def extensions(self, global_step)"}, {"docstring": "Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.", "name": "_fetch_values", "signature": "def _fetch_values(self, params, batch_loss, pos, global_step)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040001", "prompt": "Implement the Python class `AlphaExpensive` described below.\n\nClass description:\nCompute α but requires storing individual gradients.\n\nMethod signatures and docstrings:\n- def extensions(self, global_step): Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\n- def _fetch_values(self, params, batch_loss, pos, global_step): Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.", "prompted_full_text": "Implement the Python class `AlphaExpensive` described below.\n\nClass description:\nCompute α but requires storing individual gradients.\n\nMethod signatures and docstrings:\n- def extensions(self, global_step): Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\n- def _fetch_values(self, params, batch_loss, pos, global_step): Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\n\n<|skeleton|>\nclass AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n <|body_0|>\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n<|end_body_0|>\n\n<|body_start_1|>\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n<|end_body_1|>\n", "revision_id": "5bd5ab3cda03eda0b0bf276f29d5c28b83d70b06", "skeleton": "<|skeleton|>\nclass AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n <|body_0|>\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AlphaExpensive:\n \"\"\"Compute α but requires storing individual gradients.\"\"\"\n\n def extensions(self, global_step):\n \"\"\"Return list of BackPACK extensions required for the computation. Args: global_step (int): The current iteration number. Returns: list: (Potentially empty) list with required BackPACK quantities.\"\"\"\n ext = []\n if self._is_position(global_step, 'start') or self._is_position(global_step, 'end'):\n ext.append(extensions.BatchGrad())\n return ext\n\n def _fetch_values(self, params, batch_loss, pos, global_step):\n \"\"\"Fetch values for quadratic fit. Return as dictionary. The entry \"search_dir\" is only initialized if ``pos`` is ``\"start\"``.\"\"\"\n info = {}\n if pos in ['start', 'end']:\n info['f'] = batch_loss.item()\n info['var_f'] = get_individual_losses(global_step).var().item()\n info['params'] = {id(p): p.data.clone().detach() for p in params}\n info['grad'] = {id(p): p.grad.data.clone().detach() for p in params}\n batch_size = get_batch_size(global_step)\n info['batch_grad'] = {id(p): batch_size * p.grad_batch.data.clone().detach() for p in params}\n else:\n raise ValueError(f\"Invalid position '{pos}'. Expect {self._positions}.\")\n if pos == 'end':\n start_params, _ = self._get_info('params', end=False)\n end_params = info['params']\n search_dir = [end_params[key] - start_params[key] for key in start_params.keys()]\n for info_dict in [self._start_info, info]:\n grad = [info_dict['grad'][key] for key in start_params.keys()]\n batch_grad = [info_dict['batch_grad'][key] for key in start_params.keys()]\n info_dict['df'] = _projected_gradient(grad, search_dir)\n info_dict['var_df'] = _exact_variance(batch_grad, search_dir)\n return info\n", "source": "the_stack_v2_python_sparse", "source_path": "cockpit/quantities/alpha.py", "source_repo": "MeNicefellow/cockpit", "split": "test", "star_events_count": 0} {"blob_id": "88f02e8874b0e6ab78bd7316645f517884066e40", "bodies": ["username = self.request.user.email\nold_password = form.cleaned_data['old_password']\ncheckCredentialsResult = bsd_api.account_checkCredentials(username, old_password)\nassert_valid_account(checkCredentialsResult)", "username = self.request.user.email\nnew_password = form.cleaned_data['new_password1']\nsetPasswordResult = bsd_api.account_setPassword(username, new_password)\n'\\n Should get 204 response on success_url\\n\\n https://cshift.cp.bsd.net/page/api/doc#-----------------set_password-------------\\n '\nassert setPasswordResult.http_status is 204", "try:\n self.check_old_password(form)\nexcept AssertionError:\n messages.error(self.request, '\\n There was an error validating your old password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n'Set new password'\ntry:\n self.set_new_password(form)\nexcept AssertionError:\n messages.error(self.request, '\\n There was an error setting your new password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\nreturn super(PasswordChangeView, self).form_valid(form)"], "bodies_text": "<|body_start_0|>\n username = self.request.user.email\n old_password = form.cleaned_data['old_password']\n checkCredentialsResult = bsd_api.account_checkCredentials(username, old_password)\n assert_valid_account(checkCredentialsResult)\n<|end_body_0|>\n\n<|body_start_1|>\n username = self.request.user.email\n new_password = form.cleaned_data['new_password1']\n setPasswordResult = bsd_api.account_setPassword(username, new_password)\n '\\n Should get 204 response on success_url\\n\\n https://cshift.cp.bsd.net/page/api/doc#-----------------set_password-------------\\n '\n assert setPasswordResult.http_status is 204\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.check_old_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error validating your old password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n 'Set new password'\n try:\n self.set_new_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error setting your new password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n return super(PasswordChangeView, self).form_valid(form)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "PasswordChangeView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PasswordChangeView:\n\n def check_old_password(self, form):\n \"\"\"Check if old password is valid in BSD\"\"\"\n <|body_0|>\n\n def set_new_password(self, form):\n \"\"\"Set new password in BSD\"\"\"\n <|body_1|>\n\n def form_valid(self, form):\n \"\"\"Check old password\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = self.request.user.email\n old_password = form.cleaned_data['old_password']\n checkCredentialsResult = bsd_api.account_checkCredentials(username, old_password)\n assert_valid_account(checkCredentialsResult)\n<|end_body_0|>\n\n<|body_start_1|>\n username = self.request.user.email\n new_password = form.cleaned_data['new_password1']\n setPasswordResult = bsd_api.account_setPassword(username, new_password)\n '\\n Should get 204 response on success_url\\n\\n https://cshift.cp.bsd.net/page/api/doc#-----------------set_password-------------\\n '\n assert setPasswordResult.http_status is 204\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.check_old_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error validating your old password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n 'Set new password'\n try:\n self.set_new_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error setting your new password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n return super(PasswordChangeView, self).form_valid(form)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000107", "length_bytes": 26076, "license_type": "permissive", "methods": [{"docstring": "Check if old password is valid in BSD", "name": "check_old_password", "signature": "def check_old_password(self, form)"}, {"docstring": "Set new password in BSD", "name": "set_new_password", "signature": "def set_new_password(self, form)"}, {"docstring": "Check old password", "name": "form_valid", "signature": "def form_valid(self, form)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_045830", "prompt": "Implement the Python class `PasswordChangeView` described below.\n\nClass description:\nImplement the PasswordChangeView class.\n\nMethod signatures and docstrings:\n- def check_old_password(self, form): Check if old password is valid in BSD\n- def set_new_password(self, form): Set new password in BSD\n- def form_valid(self, form): Check old password", "prompted_full_text": "Implement the Python class `PasswordChangeView` described below.\n\nClass description:\nImplement the PasswordChangeView class.\n\nMethod signatures and docstrings:\n- def check_old_password(self, form): Check if old password is valid in BSD\n- def set_new_password(self, form): Set new password in BSD\n- def form_valid(self, form): Check old password\n\n<|skeleton|>\nclass PasswordChangeView:\n\n def check_old_password(self, form):\n \"\"\"Check if old password is valid in BSD\"\"\"\n <|body_0|>\n\n def set_new_password(self, form):\n \"\"\"Set new password in BSD\"\"\"\n <|body_1|>\n\n def form_valid(self, form):\n \"\"\"Check old password\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = self.request.user.email\n old_password = form.cleaned_data['old_password']\n checkCredentialsResult = bsd_api.account_checkCredentials(username, old_password)\n assert_valid_account(checkCredentialsResult)\n<|end_body_0|>\n\n<|body_start_1|>\n username = self.request.user.email\n new_password = form.cleaned_data['new_password1']\n setPasswordResult = bsd_api.account_setPassword(username, new_password)\n '\\n Should get 204 response on success_url\\n\\n https://cshift.cp.bsd.net/page/api/doc#-----------------set_password-------------\\n '\n assert setPasswordResult.http_status is 204\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.check_old_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error validating your old password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n 'Set new password'\n try:\n self.set_new_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error setting your new password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n return super(PasswordChangeView, self).form_valid(form)\n<|end_body_2|>\n", "revision_id": "c8024b805ff5ff0e16f54dce7bf05097fd2f08e0", "skeleton": "<|skeleton|>\nclass PasswordChangeView:\n\n def check_old_password(self, form):\n \"\"\"Check if old password is valid in BSD\"\"\"\n <|body_0|>\n\n def set_new_password(self, form):\n \"\"\"Set new password in BSD\"\"\"\n <|body_1|>\n\n def form_valid(self, form):\n \"\"\"Check old password\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PasswordChangeView:\n def check_old_password(self, form):\n \"\"\"Check if old password is valid in BSD\"\"\"\n username = self.request.user.email\n old_password = form.cleaned_data['old_password']\n checkCredentialsResult = bsd_api.account_checkCredentials(username, old_password)\n assert_valid_account(checkCredentialsResult)\n\n def set_new_password(self, form):\n \"\"\"Set new password in BSD\"\"\"\n username = self.request.user.email\n new_password = form.cleaned_data['new_password1']\n setPasswordResult = bsd_api.account_setPassword(username, new_password)\n '\\n Should get 204 response on success_url\\n\\n https://cshift.cp.bsd.net/page/api/doc#-----------------set_password-------------\\n '\n assert setPasswordResult.http_status is 204\n\n def form_valid(self, form):\n \"\"\"Check old password\"\"\"\n try:\n self.check_old_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error validating your old password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n 'Set new password'\n try:\n self.set_new_password(form)\n except AssertionError:\n messages.error(self.request, '\\n There was an error setting your new password. Please make\\n sure all fields are filled with correct data and try again.\\n ')\n return redirect('organizing-hub-password-change')\n return super(PasswordChangeView, self).form_valid(form)\n", "source": "the_stack_v2_python_sparse", "source_path": "organizing_hub/views/views.py", "source_repo": "Our-Revolution/site", "split": "test", "star_events_count": 4} {"blob_id": "86a6b69d3dd4bfca16f00505a3460e4b143c4a46", "bodies": ["self.follower_followee = {}\nself.all_usefeed = {}\nself.time_post = []", "if userId in self.all_usefeed.keys():\n self.all_usefeed[userId].append(tweetId)\nelse:\n self.all_usefeed[userId] = [tweetId]\nself.time_post.append(tweetId)", "result = []\nlen_time_post = len(self.time_post)\nindex = len_time_post - 1\nall_f = []\nif userId in self.follower_followee.keys():\n for followee in self.follower_followee[userId]:\n if followee in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[followee])\nif userId in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[userId])\nwhile index > -1 and len(result) < 10:\n cur_post = self.time_post[index]\n if cur_post in all_f:\n result.append(cur_post)\n index -= 1\nreturn result", "if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n return\n else:\n self.follower_followee[followerId].append(followeeId)\nelse:\n self.follower_followee[followerId] = [followeeId]", "if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n self.follower_followee[followerId].remove(followeeId)"], "bodies_text": "<|body_start_0|>\n self.follower_followee = {}\n self.all_usefeed = {}\n self.time_post = []\n<|end_body_0|>\n\n<|body_start_1|>\n if userId in self.all_usefeed.keys():\n self.all_usefeed[userId].append(tweetId)\n else:\n self.all_usefeed[userId] = [tweetId]\n self.time_post.append(tweetId)\n<|end_body_1|>\n\n<|body_start_2|>\n result = []\n len_time_post = len(self.time_post)\n index = len_time_post - 1\n all_f = []\n if userId in self.follower_followee.keys():\n for followee in self.follower_followee[userId]:\n if followee in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[followee])\n if userId in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[userId])\n while index > -1 and len(result) < 10:\n cur_post = self.time_post[index]\n if cur_post in all_f:\n result.append(cur_post)\n index -= 1\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n return\n else:\n self.follower_followee[followerId].append(followeeId)\n else:\n self.follower_followee[followerId] = [followeeId]\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n self.follower_followee[followerId].remove(followeeId)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Twitter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.follower_followee = {}\n self.all_usefeed = {}\n self.time_post = []\n<|end_body_0|>\n\n<|body_start_1|>\n if userId in self.all_usefeed.keys():\n self.all_usefeed[userId].append(tweetId)\n else:\n self.all_usefeed[userId] = [tweetId]\n self.time_post.append(tweetId)\n<|end_body_1|>\n\n<|body_start_2|>\n result = []\n len_time_post = len(self.time_post)\n index = len_time_post - 1\n all_f = []\n if userId in self.follower_followee.keys():\n for followee in self.follower_followee[userId]:\n if followee in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[followee])\n if userId in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[userId])\n while index > -1 and len(result) < 10:\n cur_post = self.time_post[index]\n if cur_post in all_f:\n result.append(cur_post)\n index -= 1\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n return\n else:\n self.follower_followee[followerId].append(followeeId)\n else:\n self.follower_followee[followerId] = [followeeId]\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n self.follower_followee[followerId].remove(followeeId)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000108", "length_bytes": 2858, "license_type": "permissive", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Compose a new tweet.", "name": "postTweet", "signature": "def postTweet(self, userId: int, tweetId: int) -> None"}, {"docstring": "Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.", "name": "getNewsFeed", "signature": "def getNewsFeed(self, userId: int) -> List[int]"}, {"docstring": "Follower follows a followee. If the operation is invalid, it should be a no-op.", "name": "follow", "signature": "def follow(self, followerId: int, followeeId: int) -> None"}, {"docstring": "Follower unfollows a followee. If the operation is invalid, it should be a no-op.", "name": "unfollow", "signature": "def unfollow(self, followerId: int, followeeId: int) -> None"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_002526", "prompt": "Implement the Python class `Twitter` described below.\n\nClass description:\nImplement the Twitter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def postTweet(self, userId: int, tweetId: int) -> None: Compose a new tweet.\n- def getNewsFeed(self, userId: int) -> List[int]: Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\n- def follow(self, followerId: int, followeeId: int) -> None: Follower follows a followee. If the operation is invalid, it should be a no-op.\n- def unfollow(self, followerId: int, followeeId: int) -> None: Follower unfollows a followee. If the operation is invalid, it should be a no-op.", "prompted_full_text": "Implement the Python class `Twitter` described below.\n\nClass description:\nImplement the Twitter class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def postTweet(self, userId: int, tweetId: int) -> None: Compose a new tweet.\n- def getNewsFeed(self, userId: int) -> List[int]: Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\n- def follow(self, followerId: int, followeeId: int) -> None: Follower follows a followee. If the operation is invalid, it should be a no-op.\n- def unfollow(self, followerId: int, followeeId: int) -> None: Follower unfollows a followee. If the operation is invalid, it should be a no-op.\n\n<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.follower_followee = {}\n self.all_usefeed = {}\n self.time_post = []\n<|end_body_0|>\n\n<|body_start_1|>\n if userId in self.all_usefeed.keys():\n self.all_usefeed[userId].append(tweetId)\n else:\n self.all_usefeed[userId] = [tweetId]\n self.time_post.append(tweetId)\n<|end_body_1|>\n\n<|body_start_2|>\n result = []\n len_time_post = len(self.time_post)\n index = len_time_post - 1\n all_f = []\n if userId in self.follower_followee.keys():\n for followee in self.follower_followee[userId]:\n if followee in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[followee])\n if userId in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[userId])\n while index > -1 and len(result) < 10:\n cur_post = self.time_post[index]\n if cur_post in all_f:\n result.append(cur_post)\n index -= 1\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n return\n else:\n self.follower_followee[followerId].append(followeeId)\n else:\n self.follower_followee[followerId] = [followeeId]\n<|end_body_3|>\n\n<|body_start_4|>\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n self.follower_followee[followerId].remove(followeeId)\n<|end_body_4|>\n", "revision_id": "55c6c3e7890b596b709b50cafa415b9594c03edd", "skeleton": "<|skeleton|>\nclass Twitter:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n <|body_1|>\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n <|body_2|>\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_3|>\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Twitter:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.follower_followee = {}\n self.all_usefeed = {}\n self.time_post = []\n\n def postTweet(self, userId: int, tweetId: int) -> None:\n \"\"\"Compose a new tweet.\"\"\"\n if userId in self.all_usefeed.keys():\n self.all_usefeed[userId].append(tweetId)\n else:\n self.all_usefeed[userId] = [tweetId]\n self.time_post.append(tweetId)\n\n def getNewsFeed(self, userId: int) -> List[int]:\n \"\"\"Retrieve the 10 most recent tweet ids in the user's news feed. Each item in the news feed must be posted by users who the user followed or by the user herself. Tweets must be ordered from most recent to least recent.\"\"\"\n result = []\n len_time_post = len(self.time_post)\n index = len_time_post - 1\n all_f = []\n if userId in self.follower_followee.keys():\n for followee in self.follower_followee[userId]:\n if followee in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[followee])\n if userId in self.all_usefeed.keys():\n all_f.extend(self.all_usefeed[userId])\n while index > -1 and len(result) < 10:\n cur_post = self.time_post[index]\n if cur_post in all_f:\n result.append(cur_post)\n index -= 1\n return result\n\n def follow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower follows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n return\n else:\n self.follower_followee[followerId].append(followeeId)\n else:\n self.follower_followee[followerId] = [followeeId]\n\n def unfollow(self, followerId: int, followeeId: int) -> None:\n \"\"\"Follower unfollows a followee. If the operation is invalid, it should be a no-op.\"\"\"\n if followerId in self.follower_followee.keys():\n if followeeId in self.follower_followee[followerId]:\n self.follower_followee[followerId].remove(followeeId)\n", "source": "the_stack_v2_python_sparse", "source_path": "tencent/Twitter.py", "source_repo": "summer-vacation/AlgoExec", "split": "test", "star_events_count": 1} {"blob_id": "1d052e68851bfbee35867cf6718e181321af9c24", "bodies": ["stack = [root] if root else []\nall_nodes = {}\nwhile stack:\n new_stack = []\n for node in stack:\n for child in (node.left, node.right):\n if child:\n all_nodes[child] = node\n new_stack.append(child)\n if not new_stack:\n break\n stack = new_stack\nlevel = set(stack)\nwhile len(level) > 1:\n new_level = set((all_nodes[node] for node in level))\n level = new_level\nreturn level.pop()", "def dfs(root):\n if not root:\n return (0, None)\n l, r = (dfs(root.left), dfs(root.right))\n if l[0] > r[0]:\n return (l[0] + 1, l[1])\n elif l[0] < r[0]:\n return (r[0] + 1, r[1])\n else:\n return (l[0] + 1, root)\n return dfs(root)[1]"], "bodies_text": "<|body_start_0|>\n stack = [root] if root else []\n all_nodes = {}\n while stack:\n new_stack = []\n for node in stack:\n for child in (node.left, node.right):\n if child:\n all_nodes[child] = node\n new_stack.append(child)\n if not new_stack:\n break\n stack = new_stack\n level = set(stack)\n while len(level) > 1:\n new_level = set((all_nodes[node] for node in level))\n level = new_level\n return level.pop()\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(root):\n if not root:\n return (0, None)\n l, r = (dfs(root.left), dfs(root.right))\n if l[0] > r[0]:\n return (l[0] + 1, l[1])\n elif l[0] < r[0]:\n return (r[0] + 1, r[1])\n else:\n return (l[0] + 1, root)\n return dfs(root)[1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stack = [root] if root else []\n all_nodes = {}\n while stack:\n new_stack = []\n for node in stack:\n for child in (node.left, node.right):\n if child:\n all_nodes[child] = node\n new_stack.append(child)\n if not new_stack:\n break\n stack = new_stack\n level = set(stack)\n while len(level) > 1:\n new_level = set((all_nodes[node] for node in level))\n level = new_level\n return level.pop()\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(root):\n if not root:\n return (0, None)\n l, r = (dfs(root.left), dfs(root.right))\n if l[0] > r[0]:\n return (l[0] + 1, l[1])\n elif l[0] < r[0]:\n return (r[0] + 1, r[1])\n else:\n return (l[0] + 1, root)\n return dfs(root)[1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000109", "length_bytes": 1261, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "subtreeWithAllDeepest", "signature": "def subtreeWithAllDeepest(self, root)"}, {"docstring": ":type root: TreeNode :rtype: TreeNode", "name": "subtreeWithAllDeepest", "signature": "def subtreeWithAllDeepest(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026158", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def subtreeWithAllDeepest(self, root): :type root: TreeNode :rtype: TreeNode\n- def subtreeWithAllDeepest(self, root): :type root: TreeNode :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def subtreeWithAllDeepest(self, root): :type root: TreeNode :rtype: TreeNode\n- def subtreeWithAllDeepest(self, root): :type root: TreeNode :rtype: TreeNode\n\n<|skeleton|>\nclass Solution:\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n stack = [root] if root else []\n all_nodes = {}\n while stack:\n new_stack = []\n for node in stack:\n for child in (node.left, node.right):\n if child:\n all_nodes[child] = node\n new_stack.append(child)\n if not new_stack:\n break\n stack = new_stack\n level = set(stack)\n while len(level) > 1:\n new_level = set((all_nodes[node] for node in level))\n level = new_level\n return level.pop()\n<|end_body_0|>\n\n<|body_start_1|>\n def dfs(root):\n if not root:\n return (0, None)\n l, r = (dfs(root.left), dfs(root.right))\n if l[0] > r[0]:\n return (l[0] + 1, l[1])\n elif l[0] < r[0]:\n return (r[0] + 1, r[1])\n else:\n return (l[0] + 1, root)\n return dfs(root)[1]\n<|end_body_1|>\n", "revision_id": "16e4343922041929bc3021e152093425066620bb", "skeleton": "<|skeleton|>\nclass Solution:\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n stack = [root] if root else []\n all_nodes = {}\n while stack:\n new_stack = []\n for node in stack:\n for child in (node.left, node.right):\n if child:\n all_nodes[child] = node\n new_stack.append(child)\n if not new_stack:\n break\n stack = new_stack\n level = set(stack)\n while len(level) > 1:\n new_level = set((all_nodes[node] for node in level))\n level = new_level\n return level.pop()\n\n def subtreeWithAllDeepest(self, root):\n \"\"\":type root: TreeNode :rtype: TreeNode\"\"\"\n def dfs(root):\n if not root:\n return (0, None)\n l, r = (dfs(root.left), dfs(root.right))\n if l[0] > r[0]:\n return (l[0] + 1, l[1])\n elif l[0] < r[0]:\n return (r[0] + 1, r[1])\n else:\n return (l[0] + 1, root)\n return dfs(root)[1]\n", "source": "the_stack_v2_python_sparse", "source_path": "865_subtreeWithAllDeepest.py", "source_repo": "zzz686970/leetcode-2018", "split": "test", "star_events_count": 3} {"blob_id": "8d8c10531fad5f016f77fbd84fee07e3127c644b", "bodies": ["params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\nparams.update(kwargs)\nreturn self.api._get('emails/get_or_create', params=params)", "params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\nparams.update(kwargs)\nreturn self.api._get('emails/disable', params=params)"], "bodies_text": "<|body_start_0|>\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/get_or_create', params=params)\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/disable', params=params)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "EmailsManager", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EmailsManager:\n\n def get_or_create(self, obj_type, obj_id, **kwargs):\n \"\"\"Get or create email to an object.\"\"\"\n <|body_0|>\n\n def disable(self, obj_type, obj_id, **kwargs):\n \"\"\"Disable email to an object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/get_or_create', params=params)\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/disable', params=params)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000110", "length_bytes": 668, "license_type": "permissive", "methods": [{"docstring": "Get or create email to an object.", "name": "get_or_create", "signature": "def get_or_create(self, obj_type, obj_id, **kwargs)"}, {"docstring": "Disable email to an object.", "name": "disable", "signature": "def disable(self, obj_type, obj_id, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029154", "prompt": "Implement the Python class `EmailsManager` described below.\n\nClass description:\nImplement the EmailsManager class.\n\nMethod signatures and docstrings:\n- def get_or_create(self, obj_type, obj_id, **kwargs): Get or create email to an object.\n- def disable(self, obj_type, obj_id, **kwargs): Disable email to an object.", "prompted_full_text": "Implement the Python class `EmailsManager` described below.\n\nClass description:\nImplement the EmailsManager class.\n\nMethod signatures and docstrings:\n- def get_or_create(self, obj_type, obj_id, **kwargs): Get or create email to an object.\n- def disable(self, obj_type, obj_id, **kwargs): Disable email to an object.\n\n<|skeleton|>\nclass EmailsManager:\n\n def get_or_create(self, obj_type, obj_id, **kwargs):\n \"\"\"Get or create email to an object.\"\"\"\n <|body_0|>\n\n def disable(self, obj_type, obj_id, **kwargs):\n \"\"\"Disable email to an object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/get_or_create', params=params)\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/disable', params=params)\n<|end_body_1|>\n", "revision_id": "7b85de81619146d3d54fececda068010ae73775b", "skeleton": "<|skeleton|>\nclass EmailsManager:\n\n def get_or_create(self, obj_type, obj_id, **kwargs):\n \"\"\"Get or create email to an object.\"\"\"\n <|body_0|>\n\n def disable(self, obj_type, obj_id, **kwargs):\n \"\"\"Disable email to an object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EmailsManager:\n def get_or_create(self, obj_type, obj_id, **kwargs):\n \"\"\"Get or create email to an object.\"\"\"\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/get_or_create', params=params)\n\n def disable(self, obj_type, obj_id, **kwargs):\n \"\"\"Disable email to an object.\"\"\"\n params = {'token': self.token, 'obj_type': obj_type, 'obj_id': obj_id}\n params.update(kwargs)\n return self.api._get('emails/disable', params=params)\n", "source": "the_stack_v2_python_sparse", "source_path": "todoist/managers/emails.py", "source_repo": "Doist/todoist-python", "split": "test", "star_events_count": 627} {"blob_id": "7905a815d83e40adb321a45350231cfac63c63e0", "bodies": ["parser.add_argument('id', help='The ID of the read group set to be updated.')\nparser.add_argument('--name', help='The new name of the readgroupset.')\nparser.add_argument('--reference-set-id', help='The new reference set ID of the readgroupset.')", "apitools_client = genomics_util.GetGenomicsClient()\ngenomics_messages = genomics_util.GetGenomicsMessages()\nif not (args.name or args.reference_set_id):\n raise GenomicsError('Must specify --name and/or --reference-set-id')\nupdated = genomics_messages.ReadGroupSet()\nmask = []\nif args.name:\n updated.name = args.name\n mask.append('name')\nif args.reference_set_id:\n updated.referenceSetId = args.reference_set_id\n mask.append('referenceSetId')\nrequest = genomics_messages.GenomicsReadgroupsetsPatchRequest(readGroupSet=updated, readGroupSetId=str(args.id), updateMask=','.join(mask))\nresult = apitools_client.readgroupsets.Patch(request)\nname = str(result.id)\nif result.name:\n name += ', name: {0}'.format(result.name)\nif result.referenceSetId:\n name += ', referenceSetId: {0}'.format(result.referenceSetId)\nlog.UpdatedResource(name, kind='readgroupset')\nreturn result"], "bodies_text": "<|body_start_0|>\n parser.add_argument('id', help='The ID of the read group set to be updated.')\n parser.add_argument('--name', help='The new name of the readgroupset.')\n parser.add_argument('--reference-set-id', help='The new reference set ID of the readgroupset.')\n<|end_body_0|>\n\n<|body_start_1|>\n apitools_client = genomics_util.GetGenomicsClient()\n genomics_messages = genomics_util.GetGenomicsMessages()\n if not (args.name or args.reference_set_id):\n raise GenomicsError('Must specify --name and/or --reference-set-id')\n updated = genomics_messages.ReadGroupSet()\n mask = []\n if args.name:\n updated.name = args.name\n mask.append('name')\n if args.reference_set_id:\n updated.referenceSetId = args.reference_set_id\n mask.append('referenceSetId')\n request = genomics_messages.GenomicsReadgroupsetsPatchRequest(readGroupSet=updated, readGroupSetId=str(args.id), updateMask=','.join(mask))\n result = apitools_client.readgroupsets.Patch(request)\n name = str(result.id)\n if result.name:\n name += ', name: {0}'.format(result.name)\n if result.referenceSetId:\n name += ', referenceSetId: {0}'.format(result.referenceSetId)\n log.UpdatedResource(name, kind='readgroupset')\n return result\n<|end_body_1|>\n", "class_docstring": "Updates a readgroupset name and/or referenceSetId.", "class_name": "Update", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Update:\n \"\"\"Updates a readgroupset name and/or referenceSetId.\"\"\"\n\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('id', help='The ID of the read group set to be updated.')\n parser.add_argument('--name', help='The new name of the readgroupset.')\n parser.add_argument('--reference-set-id', help='The new reference set ID of the readgroupset.')\n<|end_body_0|>\n\n<|body_start_1|>\n apitools_client = genomics_util.GetGenomicsClient()\n genomics_messages = genomics_util.GetGenomicsMessages()\n if not (args.name or args.reference_set_id):\n raise GenomicsError('Must specify --name and/or --reference-set-id')\n updated = genomics_messages.ReadGroupSet()\n mask = []\n if args.name:\n updated.name = args.name\n mask.append('name')\n if args.reference_set_id:\n updated.referenceSetId = args.reference_set_id\n mask.append('referenceSetId')\n request = genomics_messages.GenomicsReadgroupsetsPatchRequest(readGroupSet=updated, readGroupSetId=str(args.id), updateMask=','.join(mask))\n result = apitools_client.readgroupsets.Patch(request)\n name = str(result.id)\n if result.name:\n name += ', name: {0}'.format(result.name)\n if result.referenceSetId:\n name += ', referenceSetId: {0}'.format(result.referenceSetId)\n log.UpdatedResource(name, kind='readgroupset')\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000111", "length_bytes": 2773, "license_type": "permissive", "methods": [{"docstring": "Register flags for this command.", "name": "Args", "signature": "def Args(parser)"}, {"docstring": "This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None", "name": "Run", "signature": "def Run(self, args)"}], "n_methods": 2, "prompt": "Implement the Python class `Update` described below.\n\nClass description:\nUpdates a readgroupset name and/or referenceSetId.\n\nMethod signatures and docstrings:\n- def Args(parser): Register flags for this command.\n- def Run(self, args): This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None", "prompted_full_text": "Implement the Python class `Update` described below.\n\nClass description:\nUpdates a readgroupset name and/or referenceSetId.\n\nMethod signatures and docstrings:\n- def Args(parser): Register flags for this command.\n- def Run(self, args): This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None\n\n<|skeleton|>\nclass Update:\n \"\"\"Updates a readgroupset name and/or referenceSetId.\"\"\"\n\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n parser.add_argument('id', help='The ID of the read group set to be updated.')\n parser.add_argument('--name', help='The new name of the readgroupset.')\n parser.add_argument('--reference-set-id', help='The new reference set ID of the readgroupset.')\n<|end_body_0|>\n\n<|body_start_1|>\n apitools_client = genomics_util.GetGenomicsClient()\n genomics_messages = genomics_util.GetGenomicsMessages()\n if not (args.name or args.reference_set_id):\n raise GenomicsError('Must specify --name and/or --reference-set-id')\n updated = genomics_messages.ReadGroupSet()\n mask = []\n if args.name:\n updated.name = args.name\n mask.append('name')\n if args.reference_set_id:\n updated.referenceSetId = args.reference_set_id\n mask.append('referenceSetId')\n request = genomics_messages.GenomicsReadgroupsetsPatchRequest(readGroupSet=updated, readGroupSetId=str(args.id), updateMask=','.join(mask))\n result = apitools_client.readgroupsets.Patch(request)\n name = str(result.id)\n if result.name:\n name += ', name: {0}'.format(result.name)\n if result.referenceSetId:\n name += ', referenceSetId: {0}'.format(result.referenceSetId)\n log.UpdatedResource(name, kind='readgroupset')\n return result\n<|end_body_1|>\n", "revision_id": "c98b58aeb0994e011df960163541e9379ae7ea06", "skeleton": "<|skeleton|>\nclass Update:\n \"\"\"Updates a readgroupset name and/or referenceSetId.\"\"\"\n\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n <|body_0|>\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Update:\n \"\"\"Updates a readgroupset name and/or referenceSetId.\"\"\"\n\n def Args(parser):\n \"\"\"Register flags for this command.\"\"\"\n parser.add_argument('id', help='The ID of the read group set to be updated.')\n parser.add_argument('--name', help='The new name of the readgroupset.')\n parser.add_argument('--reference-set-id', help='The new reference set ID of the readgroupset.')\n\n def Run(self, args):\n \"\"\"This is what gets called when the user runs this command. Args: args: an argparse namespace, All the arguments that were provided to this command invocation. Raises: HttpException: An http error response was received while executing api request. Returns: None\"\"\"\n apitools_client = genomics_util.GetGenomicsClient()\n genomics_messages = genomics_util.GetGenomicsMessages()\n if not (args.name or args.reference_set_id):\n raise GenomicsError('Must specify --name and/or --reference-set-id')\n updated = genomics_messages.ReadGroupSet()\n mask = []\n if args.name:\n updated.name = args.name\n mask.append('name')\n if args.reference_set_id:\n updated.referenceSetId = args.reference_set_id\n mask.append('referenceSetId')\n request = genomics_messages.GenomicsReadgroupsetsPatchRequest(readGroupSet=updated, readGroupSetId=str(args.id), updateMask=','.join(mask))\n result = apitools_client.readgroupsets.Patch(request)\n name = str(result.id)\n if result.name:\n name += ', name: {0}'.format(result.name)\n if result.referenceSetId:\n name += ', referenceSetId: {0}'.format(result.referenceSetId)\n log.UpdatedResource(name, kind='readgroupset')\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "google-cloud-sdk/lib/surface/genomics/readgroupsets/update.py", "source_repo": "KaranToor/MA450", "split": "test", "star_events_count": 1} {"blob_id": "54441ebaa1b7b7caf498f6fd48f9b6e911cc9963", "bodies": ["self.cmd_fds = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\nself.stdout = buffered_non_blocking_reader(self.cmd_fds.stdout)\npickle.dump(parameter, self.cmd_fds.stdin)", "data = self.stdout.readline(timeout).rstrip()\nif data != 'ok':\n return False\nreturn True", "try:\n results = []\n while self.stdout.has_data():\n results.append(self.stdout.readline(timeout))\n if results[0] == '':\n raise Exception('no data collected in timeout period')\n converted = pickle.loads('\\n'.join(results))\nexcept Exception as e:\n stderr = non_blocking_read(self.cmd_fds.stderr)\n if stderr:\n raise Exception('something bad happened in subprocess: %s' % stderr)\n else:\n raise\nreturn converted"], "bodies_text": "<|body_start_0|>\n self.cmd_fds = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n self.stdout = buffered_non_blocking_reader(self.cmd_fds.stdout)\n pickle.dump(parameter, self.cmd_fds.stdin)\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.stdout.readline(timeout).rstrip()\n if data != 'ok':\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n results = []\n while self.stdout.has_data():\n results.append(self.stdout.readline(timeout))\n if results[0] == '':\n raise Exception('no data collected in timeout period')\n converted = pickle.loads('\\n'.join(results))\n except Exception as e:\n stderr = non_blocking_read(self.cmd_fds.stderr)\n if stderr:\n raise Exception('something bad happened in subprocess: %s' % stderr)\n else:\n raise\n return converted\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ParentProcess", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParentProcess:\n\n def __init__(self, command, parameter):\n \"\"\"command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)\"\"\"\n <|body_0|>\n\n def is_ok(self, timeout=0.1):\n \"\"\"returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs\"\"\"\n <|body_1|>\n\n def get_results(self, timeout=5):\n \"\"\"returns the final results, or raises an exception if there are none\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cmd_fds = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n self.stdout = buffered_non_blocking_reader(self.cmd_fds.stdout)\n pickle.dump(parameter, self.cmd_fds.stdin)\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.stdout.readline(timeout).rstrip()\n if data != 'ok':\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n results = []\n while self.stdout.has_data():\n results.append(self.stdout.readline(timeout))\n if results[0] == '':\n raise Exception('no data collected in timeout period')\n converted = pickle.loads('\\n'.join(results))\n except Exception as e:\n stderr = non_blocking_read(self.cmd_fds.stderr)\n if stderr:\n raise Exception('something bad happened in subprocess: %s' % stderr)\n else:\n raise\n return converted\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000112", "length_bytes": 4627, "license_type": "no_license", "methods": [{"docstring": "command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)", "name": "__init__", "signature": "def __init__(self, command, parameter)"}, {"docstring": "returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs", "name": "is_ok", "signature": "def is_ok(self, timeout=0.1)"}, {"docstring": "returns the final results, or raises an exception if there are none", "name": "get_results", "signature": "def get_results(self, timeout=5)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005075", "prompt": "Implement the Python class `ParentProcess` described below.\n\nClass description:\nImplement the ParentProcess class.\n\nMethod signatures and docstrings:\n- def __init__(self, command, parameter): command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)\n- def is_ok(self, timeout=0.1): returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs\n- def get_results(self, timeout=5): returns the final results, or raises an exception if there are none", "prompted_full_text": "Implement the Python class `ParentProcess` described below.\n\nClass description:\nImplement the ParentProcess class.\n\nMethod signatures and docstrings:\n- def __init__(self, command, parameter): command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)\n- def is_ok(self, timeout=0.1): returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs\n- def get_results(self, timeout=5): returns the final results, or raises an exception if there are none\n\n<|skeleton|>\nclass ParentProcess:\n\n def __init__(self, command, parameter):\n \"\"\"command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)\"\"\"\n <|body_0|>\n\n def is_ok(self, timeout=0.1):\n \"\"\"returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs\"\"\"\n <|body_1|>\n\n def get_results(self, timeout=5):\n \"\"\"returns the final results, or raises an exception if there are none\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cmd_fds = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n self.stdout = buffered_non_blocking_reader(self.cmd_fds.stdout)\n pickle.dump(parameter, self.cmd_fds.stdin)\n<|end_body_0|>\n\n<|body_start_1|>\n data = self.stdout.readline(timeout).rstrip()\n if data != 'ok':\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n results = []\n while self.stdout.has_data():\n results.append(self.stdout.readline(timeout))\n if results[0] == '':\n raise Exception('no data collected in timeout period')\n converted = pickle.loads('\\n'.join(results))\n except Exception as e:\n stderr = non_blocking_read(self.cmd_fds.stderr)\n if stderr:\n raise Exception('something bad happened in subprocess: %s' % stderr)\n else:\n raise\n return converted\n<|end_body_2|>\n", "revision_id": "4822b4e9c9d165e4be151948d3c8a800fe1b814e", "skeleton": "<|skeleton|>\nclass ParentProcess:\n\n def __init__(self, command, parameter):\n \"\"\"command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)\"\"\"\n <|body_0|>\n\n def is_ok(self, timeout=0.1):\n \"\"\"returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs\"\"\"\n <|body_1|>\n\n def get_results(self, timeout=5):\n \"\"\"returns the final results, or raises an exception if there are none\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ParentProcess:\n def __init__(self, command, parameter):\n \"\"\"command is formated for Popen; [\"cmd\", \"param\"] parameter is one variable, but can be complex (array, dictionary, etc)\"\"\"\n self.cmd_fds = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)\n self.stdout = buffered_non_blocking_reader(self.cmd_fds.stdout)\n pickle.dump(parameter, self.cmd_fds.stdin)\n\n def is_ok(self, timeout=0.1):\n \"\"\"returns true if ok; 1 to 1 correspondence with ChildProcess.send_ok - other code doesn't automatically chomp any extra OKs\"\"\"\n data = self.stdout.readline(timeout).rstrip()\n if data != 'ok':\n return False\n return True\n\n def get_results(self, timeout=5):\n \"\"\"returns the final results, or raises an exception if there are none\"\"\"\n try:\n results = []\n while self.stdout.has_data():\n results.append(self.stdout.readline(timeout))\n if results[0] == '':\n raise Exception('no data collected in timeout period')\n converted = pickle.loads('\\n'.join(results))\n except Exception as e:\n stderr = non_blocking_read(self.cmd_fds.stderr)\n if stderr:\n raise Exception('something bad happened in subprocess: %s' % stderr)\n else:\n raise\n return converted\n", "source": "the_stack_v2_python_sparse", "source_path": "socialscan/independentsubprocess/independentsubprocess.py", "source_repo": "F3DS/f3ds", "split": "test", "star_events_count": 1} {"blob_id": "bc2dfa0aa1f1aa977745b410566de6a4f4166f92", "bodies": ["super().__init__()\nself.dropout = tf.keras.layers.Dropout(dropout_rate)\nself.attention_heads1 = [GraphAttentionLayer(node_feature_dim, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\nself.attention_heads2 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\nself.attention_heads3 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\nself.i_layer = tf.keras.layers.Dense(readout_layer_size, activation='sigmoid', kernel_regularizer=kernel_regularizer)\nself.j_layer = tf.keras.layers.Dense(readout_layer_size, kernel_regularizer=kernel_regularizer)\nself.classifier = tf.keras.layers.Dense(num_classes, kernel_regularizer=kernel_regularizer)\nself.softmax = tf.keras.layers.Softmax()", "attention_layers = [self.attention_heads1, self.attention_heads2]\nnodes_under_iter = nodes\nfor attention_heads in attention_layers:\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n nodes_under_iter = tf.concat([a_head(nodes_under_iter, adj) for a_head in attention_heads], axis=2)\n nodes_under_iter = tf.nn.elu(nodes_under_iter)\nnodes_under_iter = self.dropout(nodes_under_iter, training=training)\nif len(self.attention_heads3) > 1:\n nodes_under_iter = tf.keras.layers.Average()([a_head(nodes_under_iter, adj) for a_head in self.attention_heads3])\nelse:\n nodes_under_iter = self.attention_heads3[0](nodes_under_iter, adj)\nreadout = tf.reduce_sum(tf.multiply(self.i_layer(tf.keras.layers.Concatenate()([nodes_under_iter, nodes])), self.j_layer(nodes_under_iter)), axis=1)\nreturn readout", "nodes, edges = (inputs['atoms'], inputs['pairs'])\nadjacency_matrix = tf.cast(get_adjacency_matrix(edges), tf.int32)\nreadout = self.graph_representation(nodes, adjacency_matrix, training)\nlogits = self.classifier(readout, training=training)\nreturn self.softmax(logits)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n self.attention_heads1 = [GraphAttentionLayer(node_feature_dim, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads2 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads3 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.i_layer = tf.keras.layers.Dense(readout_layer_size, activation='sigmoid', kernel_regularizer=kernel_regularizer)\n self.j_layer = tf.keras.layers.Dense(readout_layer_size, kernel_regularizer=kernel_regularizer)\n self.classifier = tf.keras.layers.Dense(num_classes, kernel_regularizer=kernel_regularizer)\n self.softmax = tf.keras.layers.Softmax()\n<|end_body_0|>\n\n<|body_start_1|>\n attention_layers = [self.attention_heads1, self.attention_heads2]\n nodes_under_iter = nodes\n for attention_heads in attention_layers:\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n nodes_under_iter = tf.concat([a_head(nodes_under_iter, adj) for a_head in attention_heads], axis=2)\n nodes_under_iter = tf.nn.elu(nodes_under_iter)\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n if len(self.attention_heads3) > 1:\n nodes_under_iter = tf.keras.layers.Average()([a_head(nodes_under_iter, adj) for a_head in self.attention_heads3])\n else:\n nodes_under_iter = self.attention_heads3[0](nodes_under_iter, adj)\n readout = tf.reduce_sum(tf.multiply(self.i_layer(tf.keras.layers.Concatenate()([nodes_under_iter, nodes])), self.j_layer(nodes_under_iter)), axis=1)\n return readout\n<|end_body_1|>\n\n<|body_start_2|>\n nodes, edges = (inputs['atoms'], inputs['pairs'])\n adjacency_matrix = tf.cast(get_adjacency_matrix(edges), tf.int32)\n readout = self.graph_representation(nodes, adjacency_matrix, training)\n logits = self.classifier(readout, training=training)\n return self.softmax(logits)\n<|end_body_2|>\n", "class_docstring": "A model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903", "class_name": "GATModel", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GATModel:\n \"\"\"A model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903\"\"\"\n\n def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1):\n \"\"\"Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.\"\"\"\n <|body_0|>\n\n def graph_representation(self, nodes, adj, training=False):\n \"\"\"Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).\"\"\"\n <|body_1|>\n\n def call(self, inputs, training=False):\n \"\"\"Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n self.attention_heads1 = [GraphAttentionLayer(node_feature_dim, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads2 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads3 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.i_layer = tf.keras.layers.Dense(readout_layer_size, activation='sigmoid', kernel_regularizer=kernel_regularizer)\n self.j_layer = tf.keras.layers.Dense(readout_layer_size, kernel_regularizer=kernel_regularizer)\n self.classifier = tf.keras.layers.Dense(num_classes, kernel_regularizer=kernel_regularizer)\n self.softmax = tf.keras.layers.Softmax()\n<|end_body_0|>\n\n<|body_start_1|>\n attention_layers = [self.attention_heads1, self.attention_heads2]\n nodes_under_iter = nodes\n for attention_heads in attention_layers:\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n nodes_under_iter = tf.concat([a_head(nodes_under_iter, adj) for a_head in attention_heads], axis=2)\n nodes_under_iter = tf.nn.elu(nodes_under_iter)\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n if len(self.attention_heads3) > 1:\n nodes_under_iter = tf.keras.layers.Average()([a_head(nodes_under_iter, adj) for a_head in self.attention_heads3])\n else:\n nodes_under_iter = self.attention_heads3[0](nodes_under_iter, adj)\n readout = tf.reduce_sum(tf.multiply(self.i_layer(tf.keras.layers.Concatenate()([nodes_under_iter, nodes])), self.j_layer(nodes_under_iter)), axis=1)\n return readout\n<|end_body_1|>\n\n<|body_start_2|>\n nodes, edges = (inputs['atoms'], inputs['pairs'])\n adjacency_matrix = tf.cast(get_adjacency_matrix(edges), tf.int32)\n readout = self.graph_representation(nodes, adjacency_matrix, training)\n logits = self.classifier(readout, training=training)\n return self.softmax(logits)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000113", "length_bytes": 14737, "license_type": "permissive", "methods": [{"docstring": "Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.", "name": "__init__", "signature": "def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1)"}, {"docstring": "Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).", "name": "graph_representation", "signature": "def graph_representation(self, nodes, adj, training=False)"}, {"docstring": "Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.", "name": "call", "signature": "def call(self, inputs, training=False)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_050140", "prompt": "Implement the Python class `GATModel` described below.\n\nClass description:\nA model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903\n\nMethod signatures and docstrings:\n- def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1): Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.\n- def graph_representation(self, nodes, adj, training=False): Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).\n- def call(self, inputs, training=False): Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.", "prompted_full_text": "Implement the Python class `GATModel` described below.\n\nClass description:\nA model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903\n\nMethod signatures and docstrings:\n- def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1): Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.\n- def graph_representation(self, nodes, adj, training=False): Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).\n- def call(self, inputs, training=False): Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.\n\n<|skeleton|>\nclass GATModel:\n \"\"\"A model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903\"\"\"\n\n def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1):\n \"\"\"Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.\"\"\"\n <|body_0|>\n\n def graph_representation(self, nodes, adj, training=False):\n \"\"\"Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).\"\"\"\n <|body_1|>\n\n def call(self, inputs, training=False):\n \"\"\"Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n self.attention_heads1 = [GraphAttentionLayer(node_feature_dim, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads2 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads3 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.i_layer = tf.keras.layers.Dense(readout_layer_size, activation='sigmoid', kernel_regularizer=kernel_regularizer)\n self.j_layer = tf.keras.layers.Dense(readout_layer_size, kernel_regularizer=kernel_regularizer)\n self.classifier = tf.keras.layers.Dense(num_classes, kernel_regularizer=kernel_regularizer)\n self.softmax = tf.keras.layers.Softmax()\n<|end_body_0|>\n\n<|body_start_1|>\n attention_layers = [self.attention_heads1, self.attention_heads2]\n nodes_under_iter = nodes\n for attention_heads in attention_layers:\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n nodes_under_iter = tf.concat([a_head(nodes_under_iter, adj) for a_head in attention_heads], axis=2)\n nodes_under_iter = tf.nn.elu(nodes_under_iter)\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n if len(self.attention_heads3) > 1:\n nodes_under_iter = tf.keras.layers.Average()([a_head(nodes_under_iter, adj) for a_head in self.attention_heads3])\n else:\n nodes_under_iter = self.attention_heads3[0](nodes_under_iter, adj)\n readout = tf.reduce_sum(tf.multiply(self.i_layer(tf.keras.layers.Concatenate()([nodes_under_iter, nodes])), self.j_layer(nodes_under_iter)), axis=1)\n return readout\n<|end_body_1|>\n\n<|body_start_2|>\n nodes, edges = (inputs['atoms'], inputs['pairs'])\n adjacency_matrix = tf.cast(get_adjacency_matrix(edges), tf.int32)\n readout = self.graph_representation(nodes, adjacency_matrix, training)\n logits = self.classifier(readout, training=training)\n return self.softmax(logits)\n<|end_body_2|>\n", "revision_id": "f5f6f50f82bd441339c9d9efbef3f09e72c5fef6", "skeleton": "<|skeleton|>\nclass GATModel:\n \"\"\"A model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903\"\"\"\n\n def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1):\n \"\"\"Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.\"\"\"\n <|body_0|>\n\n def graph_representation(self, nodes, adj, training=False):\n \"\"\"Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).\"\"\"\n <|body_1|>\n\n def call(self, inputs, training=False):\n \"\"\"Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GATModel:\n \"\"\"A model that implements molecule classification via graph attention. #### References [1]: Petar Veličković, et. al. Graph Attention Networks. ICLR 2018. https://arxiv.org/abs/1710.10903\"\"\"\n\n def __init__(self, attention_heads, node_feature_dim, out_node_feature_dim, readout_layer_size, num_classes, constant_attention=False, kernel_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, dropout_rate=0.1):\n \"\"\"Construct a graph attention model. Args: attention_heads: number (integer) of attention heads. node_feature_dim: dimension (integer) of incoming node level features. out_node_feature_dim: dimension (integer) of node level features outcoming from the attention layer. readout_layer_size: dimension (integer) of graph level features after readout layer. num_classes: number (integer) of classes for classification. constant_attention: a boolean. If True, we directly use equal attention coefficients across neighbors without going through the network. Default is False. kernel_regularizer: Regularization function for Dense layer. dropout_rate: a float regulating percent of features to turn OFF.\"\"\"\n super().__init__()\n self.dropout = tf.keras.layers.Dropout(dropout_rate)\n self.attention_heads1 = [GraphAttentionLayer(node_feature_dim, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads2 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.attention_heads3 = [GraphAttentionLayer(out_node_feature_dim * attention_heads, out_node_feature_dim, constant_attention) for _ in range(attention_heads)]\n self.i_layer = tf.keras.layers.Dense(readout_layer_size, activation='sigmoid', kernel_regularizer=kernel_regularizer)\n self.j_layer = tf.keras.layers.Dense(readout_layer_size, kernel_regularizer=kernel_regularizer)\n self.classifier = tf.keras.layers.Dense(num_classes, kernel_regularizer=kernel_regularizer)\n self.softmax = tf.keras.layers.Softmax()\n\n def graph_representation(self, nodes, adj, training=False):\n \"\"\"Forward pass to compute molecular graph level representation. Args: nodes: An incoming tensor contains node level features and it should have dimension of (batch_size, num_nodes, node_feature_dim). adj: An incoming tensor contains adjacency matrices. Each adjacency matrix has added diagonal ones before entering the layer. It should have dimension of (batch_size, num_nodes, num_nodes). training: A boolean indicating if the model is in training mode or not. This affects the behavior of dropout layers. Returns: x_g: The graph level features tensor. It is aggregated from neighbours with attention and it has dimension of (batch_size, out_node_feature_dim).\"\"\"\n attention_layers = [self.attention_heads1, self.attention_heads2]\n nodes_under_iter = nodes\n for attention_heads in attention_layers:\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n nodes_under_iter = tf.concat([a_head(nodes_under_iter, adj) for a_head in attention_heads], axis=2)\n nodes_under_iter = tf.nn.elu(nodes_under_iter)\n nodes_under_iter = self.dropout(nodes_under_iter, training=training)\n if len(self.attention_heads3) > 1:\n nodes_under_iter = tf.keras.layers.Average()([a_head(nodes_under_iter, adj) for a_head in self.attention_heads3])\n else:\n nodes_under_iter = self.attention_heads3[0](nodes_under_iter, adj)\n readout = tf.reduce_sum(tf.multiply(self.i_layer(tf.keras.layers.Concatenate()([nodes_under_iter, nodes])), self.j_layer(nodes_under_iter)), axis=1)\n return readout\n\n def call(self, inputs, training=False):\n \"\"\"Forward pass computation of the model. Args: inputs: An inputs dictionary fed to the model. training: A boolean indicating if the model is in training mode or not. Returns: output: Logits tensor with dimension of (batch_size, classes) for classification.\"\"\"\n nodes, edges = (inputs['atoms'], inputs['pairs'])\n adjacency_matrix = tf.cast(get_adjacency_matrix(edges), tf.int32)\n readout = self.graph_representation(nodes, adjacency_matrix, training)\n logits = self.classifier(readout, training=training)\n return self.softmax(logits)\n", "source": "the_stack_v2_python_sparse", "source_path": "uncertainty_baselines/models/gat.py", "source_repo": "google/uncertainty-baselines", "split": "test", "star_events_count": 1235} {"blob_id": "4813cdbfac261330e4bff25a658124f1be870ff0", "bodies": ["if sns_topic_arn is None:\n sns_topic_arn = config.etl.get('SNS_TOPIC_ARN_WARNING', None)\nif script_arguments is None:\n script_arguments = list()\nscript_arguments.append('--test_name=%s' % (pipeline_name + '.' + id))\nif sns_topic_arn:\n script_arguments.append('--sns_topic_arn=%s' % sns_topic_arn)\nsuper(QATransformStep, self).__init__(id=id, script_arguments=script_arguments, no_output=True, **kwargs)", "input_args = cls.pop_inputs(input_args)\nstep_args = cls.base_arguments_processor(etl, input_args)\nstep_args['pipeline_name'] = etl.name\nreturn step_args"], "bodies_text": "<|body_start_0|>\n if sns_topic_arn is None:\n sns_topic_arn = config.etl.get('SNS_TOPIC_ARN_WARNING', None)\n if script_arguments is None:\n script_arguments = list()\n script_arguments.append('--test_name=%s' % (pipeline_name + '.' + id))\n if sns_topic_arn:\n script_arguments.append('--sns_topic_arn=%s' % sns_topic_arn)\n super(QATransformStep, self).__init__(id=id, script_arguments=script_arguments, no_output=True, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n input_args = cls.pop_inputs(input_args)\n step_args = cls.base_arguments_processor(etl, input_args)\n step_args['pipeline_name'] = etl.name\n return step_args\n<|end_body_1|>\n", "class_docstring": "QATransform Step class that helps run scripts on resouces for QA checks", "class_name": "QATransformStep", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QATransformStep:\n \"\"\"QATransform Step class that helps run scripts on resouces for QA checks\"\"\"\n\n def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs):\n \"\"\"Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class\"\"\"\n <|body_0|>\n\n def arguments_processor(cls, etl, input_args):\n \"\"\"Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if sns_topic_arn is None:\n sns_topic_arn = config.etl.get('SNS_TOPIC_ARN_WARNING', None)\n if script_arguments is None:\n script_arguments = list()\n script_arguments.append('--test_name=%s' % (pipeline_name + '.' + id))\n if sns_topic_arn:\n script_arguments.append('--sns_topic_arn=%s' % sns_topic_arn)\n super(QATransformStep, self).__init__(id=id, script_arguments=script_arguments, no_output=True, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n input_args = cls.pop_inputs(input_args)\n step_args = cls.base_arguments_processor(etl, input_args)\n step_args['pipeline_name'] = etl.name\n return step_args\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000114", "length_bytes": 1780, "license_type": "permissive", "methods": [{"docstring": "Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class", "name": "__init__", "signature": "def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs)"}, {"docstring": "Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class", "name": "arguments_processor", "signature": "def arguments_processor(cls, etl, input_args)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_014168", "prompt": "Implement the Python class `QATransformStep` described below.\n\nClass description:\nQATransform Step class that helps run scripts on resouces for QA checks\n\nMethod signatures and docstrings:\n- def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs): Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class\n- def arguments_processor(cls, etl, input_args): Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class", "prompted_full_text": "Implement the Python class `QATransformStep` described below.\n\nClass description:\nQATransform Step class that helps run scripts on resouces for QA checks\n\nMethod signatures and docstrings:\n- def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs): Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class\n- def arguments_processor(cls, etl, input_args): Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class\n\n<|skeleton|>\nclass QATransformStep:\n \"\"\"QATransform Step class that helps run scripts on resouces for QA checks\"\"\"\n\n def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs):\n \"\"\"Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class\"\"\"\n <|body_0|>\n\n def arguments_processor(cls, etl, input_args):\n \"\"\"Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if sns_topic_arn is None:\n sns_topic_arn = config.etl.get('SNS_TOPIC_ARN_WARNING', None)\n if script_arguments is None:\n script_arguments = list()\n script_arguments.append('--test_name=%s' % (pipeline_name + '.' + id))\n if sns_topic_arn:\n script_arguments.append('--sns_topic_arn=%s' % sns_topic_arn)\n super(QATransformStep, self).__init__(id=id, script_arguments=script_arguments, no_output=True, **kwargs)\n<|end_body_0|>\n\n<|body_start_1|>\n input_args = cls.pop_inputs(input_args)\n step_args = cls.base_arguments_processor(etl, input_args)\n step_args['pipeline_name'] = etl.name\n return step_args\n<|end_body_1|>\n", "revision_id": "797cb719e6c2abeda0751ada3339c72bfb19c8f2", "skeleton": "<|skeleton|>\nclass QATransformStep:\n \"\"\"QATransform Step class that helps run scripts on resouces for QA checks\"\"\"\n\n def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs):\n \"\"\"Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class\"\"\"\n <|body_0|>\n\n def arguments_processor(cls, etl, input_args):\n \"\"\"Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QATransformStep:\n \"\"\"QATransform Step class that helps run scripts on resouces for QA checks\"\"\"\n\n def __init__(self, id, pipeline_name, script_arguments=None, sns_topic_arn=None, **kwargs):\n \"\"\"Constructor for the QATransformStep class Args: sns_arn(str): sns topic arn for QA steps script_arguments(list of str): list of arguments to the script **kwargs(optional): Keyword arguments directly passed to base class\"\"\"\n if sns_topic_arn is None:\n sns_topic_arn = config.etl.get('SNS_TOPIC_ARN_WARNING', None)\n if script_arguments is None:\n script_arguments = list()\n script_arguments.append('--test_name=%s' % (pipeline_name + '.' + id))\n if sns_topic_arn:\n script_arguments.append('--sns_topic_arn=%s' % sns_topic_arn)\n super(QATransformStep, self).__init__(id=id, script_arguments=script_arguments, no_output=True, **kwargs)\n\n def arguments_processor(cls, etl, input_args):\n \"\"\"Parse the step arguments according to the ETL pipeline Args: etl(ETLPipeline): Pipeline object containing resources and steps step_args(dict): Dictionary of the step arguments for the class\"\"\"\n input_args = cls.pop_inputs(input_args)\n step_args = cls.base_arguments_processor(etl, input_args)\n step_args['pipeline_name'] = etl.name\n return step_args\n", "source": "the_stack_v2_python_sparse", "source_path": "dataduct/steps/qa_transform.py", "source_repo": "EverFi/dataduct", "split": "test", "star_events_count": 3} {"blob_id": "72a240814eec6cb600d0741e13804b33bd0e985e", "bodies": ["self._height = 0\nself._head = SkipList._Node(None)\nself._len = 0\nfor elem in iterable:\n self.add(elem)", "width = 5\nreps = []\ncurs = []\ncur = self._head\nwhile cur is not None:\n curs.append(cur)\n reps.append('')\n cur = cur.below\nlowest = curs[-1]\nwhile lowest is not None:\n for i in range(len(curs)):\n if curs[i] is not None and curs[i].value == lowest.value:\n reps[i] += str(curs[i]).center(width)\n curs[i] = curs[i].next\n else:\n reps[i] += ''.center(width)\n lowest = lowest.next\nreturn '\\n'.join(reps) + '\\n------------------'", "if value in self:\n return\ninsertion_height = _generate_level()\nwhile self._height < insertion_height + 1:\n new_head = SkipList._Node(None)\n new_head.below = self._head\n new_head.next = None\n self._head = new_head\n self._height += 1\nlevel = self._height - 1\ncurrent = self._head\nrecently_added = None\nwhile current:\n while current.next and value > current.next.value:\n current = current.next\n if level < insertion_height:\n new_node = SkipList._Node(value)\n new_node.next = current.next\n current.next = new_node\n if recently_added:\n recently_added.below = new_node\n recently_added = new_node\n current = current.below\n level -= 1", "if value not in self:\n raise KeyError('No such value %s' % value)\ncurrent = self._head\nwhile current:\n while current.next and value > current.next.value:\n current = current.next\n if current.next and current.next.value == value:\n current.next = current.next.next\n current = current.below", "current = self._head\nwhile current:\n while current.next and value >= current.next.value:\n current = current.next\n if current.value == value:\n return True\n current = current.below\nreturn False", "current = self._head\nwhile current.below:\n current = current.below\ncurrent = current.next\nwhile current:\n yield current.value\n current = current.next"], "bodies_text": "<|body_start_0|>\n self._height = 0\n self._head = SkipList._Node(None)\n self._len = 0\n for elem in iterable:\n self.add(elem)\n<|end_body_0|>\n\n<|body_start_1|>\n width = 5\n reps = []\n curs = []\n cur = self._head\n while cur is not None:\n curs.append(cur)\n reps.append('')\n cur = cur.below\n lowest = curs[-1]\n while lowest is not None:\n for i in range(len(curs)):\n if curs[i] is not None and curs[i].value == lowest.value:\n reps[i] += str(curs[i]).center(width)\n curs[i] = curs[i].next\n else:\n reps[i] += ''.center(width)\n lowest = lowest.next\n return '\\n'.join(reps) + '\\n------------------'\n<|end_body_1|>\n\n<|body_start_2|>\n if value in self:\n return\n insertion_height = _generate_level()\n while self._height < insertion_height + 1:\n new_head = SkipList._Node(None)\n new_head.below = self._head\n new_head.next = None\n self._head = new_head\n self._height += 1\n level = self._height - 1\n current = self._head\n recently_added = None\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if level < insertion_height:\n new_node = SkipList._Node(value)\n new_node.next = current.next\n current.next = new_node\n if recently_added:\n recently_added.below = new_node\n recently_added = new_node\n current = current.below\n level -= 1\n<|end_body_2|>\n\n<|body_start_3|>\n if value not in self:\n raise KeyError('No such value %s' % value)\n current = self._head\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if current.next and current.next.value == value:\n current.next = current.next.next\n current = current.below\n<|end_body_3|>\n\n<|body_start_4|>\n current = self._head\n while current:\n while current.next and value >= current.next.value:\n current = current.next\n if current.value == value:\n return True\n current = current.below\n return False\n<|end_body_4|>\n\n<|body_start_5|>\n current = self._head\n while current.below:\n current = current.below\n current = current.next\n while current:\n yield current.value\n current = current.next\n<|end_body_5|>\n", "class_docstring": "SortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.", "class_name": "SkipList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SkipList:\n \"\"\"SortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.\"\"\"\n\n def __init__(self, iterable=[]):\n \"\"\"Create a new (empty) skip list\"\"\"\n <|body_0|>\n\n def viz(self):\n \"\"\"Returns a formatted textual representation of the list\"\"\"\n <|body_1|>\n\n def add(self, value):\n \"\"\"Insert a value into the skip list; doesn't allow duplicates\"\"\"\n <|body_2|>\n\n def remove(self, value):\n \"\"\"Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.\"\"\"\n <|body_3|>\n\n def __contains__(self, value):\n \"\"\"Returns True if the given value is in the list; False otherwise\"\"\"\n <|body_4|>\n\n def __iter__(self):\n \"\"\"Returns an iterator for the list\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._height = 0\n self._head = SkipList._Node(None)\n self._len = 0\n for elem in iterable:\n self.add(elem)\n<|end_body_0|>\n\n<|body_start_1|>\n width = 5\n reps = []\n curs = []\n cur = self._head\n while cur is not None:\n curs.append(cur)\n reps.append('')\n cur = cur.below\n lowest = curs[-1]\n while lowest is not None:\n for i in range(len(curs)):\n if curs[i] is not None and curs[i].value == lowest.value:\n reps[i] += str(curs[i]).center(width)\n curs[i] = curs[i].next\n else:\n reps[i] += ''.center(width)\n lowest = lowest.next\n return '\\n'.join(reps) + '\\n------------------'\n<|end_body_1|>\n\n<|body_start_2|>\n if value in self:\n return\n insertion_height = _generate_level()\n while self._height < insertion_height + 1:\n new_head = SkipList._Node(None)\n new_head.below = self._head\n new_head.next = None\n self._head = new_head\n self._height += 1\n level = self._height - 1\n current = self._head\n recently_added = None\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if level < insertion_height:\n new_node = SkipList._Node(value)\n new_node.next = current.next\n current.next = new_node\n if recently_added:\n recently_added.below = new_node\n recently_added = new_node\n current = current.below\n level -= 1\n<|end_body_2|>\n\n<|body_start_3|>\n if value not in self:\n raise KeyError('No such value %s' % value)\n current = self._head\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if current.next and current.next.value == value:\n current.next = current.next.next\n current = current.below\n<|end_body_3|>\n\n<|body_start_4|>\n current = self._head\n while current:\n while current.next and value >= current.next.value:\n current = current.next\n if current.value == value:\n return True\n current = current.below\n return False\n<|end_body_4|>\n\n<|body_start_5|>\n current = self._head\n while current.below:\n current = current.below\n current = current.next\n while current:\n yield current.value\n current = current.next\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000115", "length_bytes": 4921, "license_type": "no_license", "methods": [{"docstring": "Create a new (empty) skip list", "name": "__init__", "signature": "def __init__(self, iterable=[])"}, {"docstring": "Returns a formatted textual representation of the list", "name": "viz", "signature": "def viz(self)"}, {"docstring": "Insert a value into the skip list; doesn't allow duplicates", "name": "add", "signature": "def add(self, value)"}, {"docstring": "Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.", "name": "remove", "signature": "def remove(self, value)"}, {"docstring": "Returns True if the given value is in the list; False otherwise", "name": "__contains__", "signature": "def __contains__(self, value)"}, {"docstring": "Returns an iterator for the list", "name": "__iter__", "signature": "def __iter__(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_020866", "prompt": "Implement the Python class `SkipList` described below.\n\nClass description:\nSortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.\n\nMethod signatures and docstrings:\n- def __init__(self, iterable=[]): Create a new (empty) skip list\n- def viz(self): Returns a formatted textual representation of the list\n- def add(self, value): Insert a value into the skip list; doesn't allow duplicates\n- def remove(self, value): Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.\n- def __contains__(self, value): Returns True if the given value is in the list; False otherwise\n- def __iter__(self): Returns an iterator for the list", "prompted_full_text": "Implement the Python class `SkipList` described below.\n\nClass description:\nSortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.\n\nMethod signatures and docstrings:\n- def __init__(self, iterable=[]): Create a new (empty) skip list\n- def viz(self): Returns a formatted textual representation of the list\n- def add(self, value): Insert a value into the skip list; doesn't allow duplicates\n- def remove(self, value): Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.\n- def __contains__(self, value): Returns True if the given value is in the list; False otherwise\n- def __iter__(self): Returns an iterator for the list\n\n<|skeleton|>\nclass SkipList:\n \"\"\"SortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.\"\"\"\n\n def __init__(self, iterable=[]):\n \"\"\"Create a new (empty) skip list\"\"\"\n <|body_0|>\n\n def viz(self):\n \"\"\"Returns a formatted textual representation of the list\"\"\"\n <|body_1|>\n\n def add(self, value):\n \"\"\"Insert a value into the skip list; doesn't allow duplicates\"\"\"\n <|body_2|>\n\n def remove(self, value):\n \"\"\"Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.\"\"\"\n <|body_3|>\n\n def __contains__(self, value):\n \"\"\"Returns True if the given value is in the list; False otherwise\"\"\"\n <|body_4|>\n\n def __iter__(self):\n \"\"\"Returns an iterator for the list\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._height = 0\n self._head = SkipList._Node(None)\n self._len = 0\n for elem in iterable:\n self.add(elem)\n<|end_body_0|>\n\n<|body_start_1|>\n width = 5\n reps = []\n curs = []\n cur = self._head\n while cur is not None:\n curs.append(cur)\n reps.append('')\n cur = cur.below\n lowest = curs[-1]\n while lowest is not None:\n for i in range(len(curs)):\n if curs[i] is not None and curs[i].value == lowest.value:\n reps[i] += str(curs[i]).center(width)\n curs[i] = curs[i].next\n else:\n reps[i] += ''.center(width)\n lowest = lowest.next\n return '\\n'.join(reps) + '\\n------------------'\n<|end_body_1|>\n\n<|body_start_2|>\n if value in self:\n return\n insertion_height = _generate_level()\n while self._height < insertion_height + 1:\n new_head = SkipList._Node(None)\n new_head.below = self._head\n new_head.next = None\n self._head = new_head\n self._height += 1\n level = self._height - 1\n current = self._head\n recently_added = None\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if level < insertion_height:\n new_node = SkipList._Node(value)\n new_node.next = current.next\n current.next = new_node\n if recently_added:\n recently_added.below = new_node\n recently_added = new_node\n current = current.below\n level -= 1\n<|end_body_2|>\n\n<|body_start_3|>\n if value not in self:\n raise KeyError('No such value %s' % value)\n current = self._head\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if current.next and current.next.value == value:\n current.next = current.next.next\n current = current.below\n<|end_body_3|>\n\n<|body_start_4|>\n current = self._head\n while current:\n while current.next and value >= current.next.value:\n current = current.next\n if current.value == value:\n return True\n current = current.below\n return False\n<|end_body_4|>\n\n<|body_start_5|>\n current = self._head\n while current.below:\n current = current.below\n current = current.next\n while current:\n yield current.value\n current = current.next\n<|end_body_5|>\n", "revision_id": "3cf95f6974e47f1e21bfa1ca2ad8c4d16093ab70", "skeleton": "<|skeleton|>\nclass SkipList:\n \"\"\"SortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.\"\"\"\n\n def __init__(self, iterable=[]):\n \"\"\"Create a new (empty) skip list\"\"\"\n <|body_0|>\n\n def viz(self):\n \"\"\"Returns a formatted textual representation of the list\"\"\"\n <|body_1|>\n\n def add(self, value):\n \"\"\"Insert a value into the skip list; doesn't allow duplicates\"\"\"\n <|body_2|>\n\n def remove(self, value):\n \"\"\"Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.\"\"\"\n <|body_3|>\n\n def __contains__(self, value):\n \"\"\"Returns True if the given value is in the list; False otherwise\"\"\"\n <|body_4|>\n\n def __iter__(self):\n \"\"\"Returns an iterator for the list\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SkipList:\n \"\"\"SortedSet ADT implemented using a skip list. Maintains elements in standard sorted order. Single-level nodes with next and below pointers. Uses sentinel values at the beginning of the skip list. Uses \"coin tosses\" to determine insertion heights.\"\"\"\n\n def __init__(self, iterable=[]):\n \"\"\"Create a new (empty) skip list\"\"\"\n self._height = 0\n self._head = SkipList._Node(None)\n self._len = 0\n for elem in iterable:\n self.add(elem)\n\n def viz(self):\n \"\"\"Returns a formatted textual representation of the list\"\"\"\n width = 5\n reps = []\n curs = []\n cur = self._head\n while cur is not None:\n curs.append(cur)\n reps.append('')\n cur = cur.below\n lowest = curs[-1]\n while lowest is not None:\n for i in range(len(curs)):\n if curs[i] is not None and curs[i].value == lowest.value:\n reps[i] += str(curs[i]).center(width)\n curs[i] = curs[i].next\n else:\n reps[i] += ''.center(width)\n lowest = lowest.next\n return '\\n'.join(reps) + '\\n------------------'\n\n def add(self, value):\n \"\"\"Insert a value into the skip list; doesn't allow duplicates\"\"\"\n if value in self:\n return\n insertion_height = _generate_level()\n while self._height < insertion_height + 1:\n new_head = SkipList._Node(None)\n new_head.below = self._head\n new_head.next = None\n self._head = new_head\n self._height += 1\n level = self._height - 1\n current = self._head\n recently_added = None\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if level < insertion_height:\n new_node = SkipList._Node(value)\n new_node.next = current.next\n current.next = new_node\n if recently_added:\n recently_added.below = new_node\n recently_added = new_node\n current = current.below\n level -= 1\n\n def remove(self, value):\n \"\"\"Find and remove a value from the skip list. Raises a KeyError if the value is not in the list.\"\"\"\n if value not in self:\n raise KeyError('No such value %s' % value)\n current = self._head\n while current:\n while current.next and value > current.next.value:\n current = current.next\n if current.next and current.next.value == value:\n current.next = current.next.next\n current = current.below\n\n def __contains__(self, value):\n \"\"\"Returns True if the given value is in the list; False otherwise\"\"\"\n current = self._head\n while current:\n while current.next and value >= current.next.value:\n current = current.next\n if current.value == value:\n return True\n current = current.below\n return False\n\n def __iter__(self):\n \"\"\"Returns an iterator for the list\"\"\"\n current = self._head\n while current.below:\n current = current.below\n current = current.next\n while current:\n yield current.value\n current = current.next\n", "source": "the_stack_v2_python_sparse", "source_path": "data_structures/skiplist.py", "source_repo": "balta2ar/scratchpad", "split": "test", "star_events_count": 1} {"blob_id": "828ef41f2cf29492209020be1ebe003ee6e404d1", "bodies": ["self._connect = connect\nself.can_log = False\nself.connect_task = None\nself.gateway = gateway\nself.protocol = None\nself.reconnect_timeout = reconnect_timeout\nself.timeout = timeout", "if not self.protocol or not self.protocol.transport:\n self.protocol = None\n return\n_LOGGER.info('Disconnecting from gateway')\nself.protocol.transport.close()\nself.protocol = None", "if not message or not self.protocol or (not self.protocol.transport):\n return\nif not self.can_log:\n _LOGGER.debug('Sending %s', message.strip())\ntry:\n self.protocol.transport.write(message.encode())\nexcept OSError as exc:\n _LOGGER.error('Failed writing to transport %s: %s', self.protocol.transport, exc)\n self.protocol.transport.close()\n self.protocol.conn_lost_callback()"], "bodies_text": "<|body_start_0|>\n self._connect = connect\n self.can_log = False\n self.connect_task = None\n self.gateway = gateway\n self.protocol = None\n self.reconnect_timeout = reconnect_timeout\n self.timeout = timeout\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.protocol or not self.protocol.transport:\n self.protocol = None\n return\n _LOGGER.info('Disconnecting from gateway')\n self.protocol.transport.close()\n self.protocol = None\n<|end_body_1|>\n\n<|body_start_2|>\n if not message or not self.protocol or (not self.protocol.transport):\n return\n if not self.can_log:\n _LOGGER.debug('Sending %s', message.strip())\n try:\n self.protocol.transport.write(message.encode())\n except OSError as exc:\n _LOGGER.error('Failed writing to transport %s: %s', self.protocol.transport, exc)\n self.protocol.transport.close()\n self.protocol.conn_lost_callback()\n<|end_body_2|>\n", "class_docstring": "Handle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.", "class_name": "Transport", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Transport:\n \"\"\"Handle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.\"\"\"\n\n def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs):\n \"\"\"Set up transport.\"\"\"\n <|body_0|>\n\n def disconnect(self):\n \"\"\"Disconnect from the transport.\"\"\"\n <|body_1|>\n\n def send(self, message):\n \"\"\"Write a message to the gateway.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._connect = connect\n self.can_log = False\n self.connect_task = None\n self.gateway = gateway\n self.protocol = None\n self.reconnect_timeout = reconnect_timeout\n self.timeout = timeout\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.protocol or not self.protocol.transport:\n self.protocol = None\n return\n _LOGGER.info('Disconnecting from gateway')\n self.protocol.transport.close()\n self.protocol = None\n<|end_body_1|>\n\n<|body_start_2|>\n if not message or not self.protocol or (not self.protocol.transport):\n return\n if not self.can_log:\n _LOGGER.debug('Sending %s', message.strip())\n try:\n self.protocol.transport.write(message.encode())\n except OSError as exc:\n _LOGGER.error('Failed writing to transport %s: %s', self.protocol.transport, exc)\n self.protocol.transport.close()\n self.protocol.conn_lost_callback()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000116", "length_bytes": 4942, "license_type": "permissive", "methods": [{"docstring": "Set up transport.", "name": "__init__", "signature": "def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs)"}, {"docstring": "Disconnect from the transport.", "name": "disconnect", "signature": "def disconnect(self)"}, {"docstring": "Write a message to the gateway.", "name": "send", "signature": "def send(self, message)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_034970", "prompt": "Implement the Python class `Transport` described below.\n\nClass description:\nHandle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.\n\nMethod signatures and docstrings:\n- def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs): Set up transport.\n- def disconnect(self): Disconnect from the transport.\n- def send(self, message): Write a message to the gateway.", "prompted_full_text": "Implement the Python class `Transport` described below.\n\nClass description:\nHandle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.\n\nMethod signatures and docstrings:\n- def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs): Set up transport.\n- def disconnect(self): Disconnect from the transport.\n- def send(self, message): Write a message to the gateway.\n\n<|skeleton|>\nclass Transport:\n \"\"\"Handle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.\"\"\"\n\n def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs):\n \"\"\"Set up transport.\"\"\"\n <|body_0|>\n\n def disconnect(self):\n \"\"\"Disconnect from the transport.\"\"\"\n <|body_1|>\n\n def send(self, message):\n \"\"\"Write a message to the gateway.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._connect = connect\n self.can_log = False\n self.connect_task = None\n self.gateway = gateway\n self.protocol = None\n self.reconnect_timeout = reconnect_timeout\n self.timeout = timeout\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.protocol or not self.protocol.transport:\n self.protocol = None\n return\n _LOGGER.info('Disconnecting from gateway')\n self.protocol.transport.close()\n self.protocol = None\n<|end_body_1|>\n\n<|body_start_2|>\n if not message or not self.protocol or (not self.protocol.transport):\n return\n if not self.can_log:\n _LOGGER.debug('Sending %s', message.strip())\n try:\n self.protocol.transport.write(message.encode())\n except OSError as exc:\n _LOGGER.error('Failed writing to transport %s: %s', self.protocol.transport, exc)\n self.protocol.transport.close()\n self.protocol.conn_lost_callback()\n<|end_body_2|>\n", "revision_id": "f7264321986a66193192a10f3261fe268eeb7601", "skeleton": "<|skeleton|>\nclass Transport:\n \"\"\"Handle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.\"\"\"\n\n def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs):\n \"\"\"Set up transport.\"\"\"\n <|body_0|>\n\n def disconnect(self):\n \"\"\"Disconnect from the transport.\"\"\"\n <|body_1|>\n\n def send(self, message):\n \"\"\"Write a message to the gateway.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Transport:\n \"\"\"Handle gateway transport. I/O is allowed in this class. This class should host methods that are related to the gateway transport type.\"\"\"\n\n def __init__(self, gateway, connect, timeout=1.0, reconnect_timeout=10.0, **kwargs):\n \"\"\"Set up transport.\"\"\"\n self._connect = connect\n self.can_log = False\n self.connect_task = None\n self.gateway = gateway\n self.protocol = None\n self.reconnect_timeout = reconnect_timeout\n self.timeout = timeout\n\n def disconnect(self):\n \"\"\"Disconnect from the transport.\"\"\"\n if not self.protocol or not self.protocol.transport:\n self.protocol = None\n return\n _LOGGER.info('Disconnecting from gateway')\n self.protocol.transport.close()\n self.protocol = None\n\n def send(self, message):\n \"\"\"Write a message to the gateway.\"\"\"\n if not message or not self.protocol or (not self.protocol.transport):\n return\n if not self.can_log:\n _LOGGER.debug('Sending %s', message.strip())\n try:\n self.protocol.transport.write(message.encode())\n except OSError as exc:\n _LOGGER.error('Failed writing to transport %s: %s', self.protocol.transport, exc)\n self.protocol.transport.close()\n self.protocol.conn_lost_callback()\n", "source": "the_stack_v2_python_sparse", "source_path": "mysensors/transport.py", "source_repo": "theolind/pymysensors", "split": "test", "star_events_count": 68} {"blob_id": "4bc8f0bfb6cdb80d47c3f0545e559fbf01850196", "bodies": ["self.coef_ = None\nself.intercept_ = None\nself._theta = None", "assert X_train.shape[0] == y_train.shape[0], 'The size of X_train must be equal to the size of y_train'\nX_b = np.vstack([np.ones((len(X_train), 1)), X_train])\nself._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\nself.coef_ = self._theta[1:]\nself.intercept_ = self._theta[0]\nreturn self", "assert self.intercept_ is not None and self.coef_ is not None, 'must fit before predict!'\nassert X_predict.shape[1] == len(self.coef_), 'the feature number of X_predict must be equal to X_train'\nX_b = np.vstack(np.ones((len(X_predict), 1), 1), X_predict)\nreturn X_b.dot(self._theta)"], "bodies_text": "<|body_start_0|>\n self.coef_ = None\n self.intercept_ = None\n self._theta = None\n<|end_body_0|>\n\n<|body_start_1|>\n assert X_train.shape[0] == y_train.shape[0], 'The size of X_train must be equal to the size of y_train'\n X_b = np.vstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.coef_ = self._theta[1:]\n self.intercept_ = self._theta[0]\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n assert self.intercept_ is not None and self.coef_ is not None, 'must fit before predict!'\n assert X_predict.shape[1] == len(self.coef_), 'the feature number of X_predict must be equal to X_train'\n X_b = np.vstack(np.ones((len(X_predict), 1), 1), X_predict)\n return X_b.dot(self._theta)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "LinearRegression", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LinearRegression:\n\n def __int__(self):\n \"\"\"构造方法\"\"\"\n <|body_0|>\n\n def fit_nomal(self, X_train, y_train):\n \"\"\"根据训练数据集X_train, y_train训练Linear Regression模型\"\"\"\n <|body_1|>\n\n def predict(self, X_predict):\n \"\"\"给定待预测数据集X_predict,返回表示X_predict的结果向量\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.coef_ = None\n self.intercept_ = None\n self._theta = None\n<|end_body_0|>\n\n<|body_start_1|>\n assert X_train.shape[0] == y_train.shape[0], 'The size of X_train must be equal to the size of y_train'\n X_b = np.vstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.coef_ = self._theta[1:]\n self.intercept_ = self._theta[0]\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n assert self.intercept_ is not None and self.coef_ is not None, 'must fit before predict!'\n assert X_predict.shape[1] == len(self.coef_), 'the feature number of X_predict must be equal to X_train'\n X_b = np.vstack(np.ones((len(X_predict), 1), 1), X_predict)\n return X_b.dot(self._theta)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000117", "length_bytes": 1092, "license_type": "no_license", "methods": [{"docstring": "构造方法", "name": "__int__", "signature": "def __int__(self)"}, {"docstring": "根据训练数据集X_train, y_train训练Linear Regression模型", "name": "fit_nomal", "signature": "def fit_nomal(self, X_train, y_train)"}, {"docstring": "给定待预测数据集X_predict,返回表示X_predict的结果向量", "name": "predict", "signature": "def predict(self, X_predict)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_032527", "prompt": "Implement the Python class `LinearRegression` described below.\n\nClass description:\nImplement the LinearRegression class.\n\nMethod signatures and docstrings:\n- def __int__(self): 构造方法\n- def fit_nomal(self, X_train, y_train): 根据训练数据集X_train, y_train训练Linear Regression模型\n- def predict(self, X_predict): 给定待预测数据集X_predict,返回表示X_predict的结果向量", "prompted_full_text": "Implement the Python class `LinearRegression` described below.\n\nClass description:\nImplement the LinearRegression class.\n\nMethod signatures and docstrings:\n- def __int__(self): 构造方法\n- def fit_nomal(self, X_train, y_train): 根据训练数据集X_train, y_train训练Linear Regression模型\n- def predict(self, X_predict): 给定待预测数据集X_predict,返回表示X_predict的结果向量\n\n<|skeleton|>\nclass LinearRegression:\n\n def __int__(self):\n \"\"\"构造方法\"\"\"\n <|body_0|>\n\n def fit_nomal(self, X_train, y_train):\n \"\"\"根据训练数据集X_train, y_train训练Linear Regression模型\"\"\"\n <|body_1|>\n\n def predict(self, X_predict):\n \"\"\"给定待预测数据集X_predict,返回表示X_predict的结果向量\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.coef_ = None\n self.intercept_ = None\n self._theta = None\n<|end_body_0|>\n\n<|body_start_1|>\n assert X_train.shape[0] == y_train.shape[0], 'The size of X_train must be equal to the size of y_train'\n X_b = np.vstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.coef_ = self._theta[1:]\n self.intercept_ = self._theta[0]\n return self\n<|end_body_1|>\n\n<|body_start_2|>\n assert self.intercept_ is not None and self.coef_ is not None, 'must fit before predict!'\n assert X_predict.shape[1] == len(self.coef_), 'the feature number of X_predict must be equal to X_train'\n X_b = np.vstack(np.ones((len(X_predict), 1), 1), X_predict)\n return X_b.dot(self._theta)\n<|end_body_2|>\n", "revision_id": "517ac7b7992a686fa5370b6fda8b62663735853c", "skeleton": "<|skeleton|>\nclass LinearRegression:\n\n def __int__(self):\n \"\"\"构造方法\"\"\"\n <|body_0|>\n\n def fit_nomal(self, X_train, y_train):\n \"\"\"根据训练数据集X_train, y_train训练Linear Regression模型\"\"\"\n <|body_1|>\n\n def predict(self, X_predict):\n \"\"\"给定待预测数据集X_predict,返回表示X_predict的结果向量\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LinearRegression:\n def __int__(self):\n \"\"\"构造方法\"\"\"\n self.coef_ = None\n self.intercept_ = None\n self._theta = None\n\n def fit_nomal(self, X_train, y_train):\n \"\"\"根据训练数据集X_train, y_train训练Linear Regression模型\"\"\"\n assert X_train.shape[0] == y_train.shape[0], 'The size of X_train must be equal to the size of y_train'\n X_b = np.vstack([np.ones((len(X_train), 1)), X_train])\n self._theta = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y_train)\n self.coef_ = self._theta[1:]\n self.intercept_ = self._theta[0]\n return self\n\n def predict(self, X_predict):\n \"\"\"给定待预测数据集X_predict,返回表示X_predict的结果向量\"\"\"\n assert self.intercept_ is not None and self.coef_ is not None, 'must fit before predict!'\n assert X_predict.shape[1] == len(self.coef_), 'the feature number of X_predict must be equal to X_train'\n X_b = np.vstack(np.ones((len(X_predict), 1), 1), X_predict)\n return X_b.dot(self._theta)\n", "source": "the_stack_v2_python_sparse", "source_path": "MachineLearning/PlayML/LinearRegression.py", "source_repo": "CharlesBird/Resources", "split": "test", "star_events_count": 1} {"blob_id": "1c4e2fd34033973c51d13e82d5ea3f5609ce3716", "bodies": ["try:\n return EnvironmentInstance.objects.get(pk=pk)\nexcept EnvironmentInstance.DoesNotExist:\n raise Http404", "env_instance = self.get_object(pk)\nserializer = EnvironmentInstanceSerializer(env_instance)\nreturn Response(serializer.data)", "env_instance = self.get_object(pk)\nserializer = EnvironmentInstanceSerializer(env_instance, data=request.data)\nif serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\nreturn Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "env_instance = self.get_object(pk)\nenv_instance.delete()\nreturn Response(status=status.HTTP_204_NO_CONTENT)"], "bodies_text": "<|body_start_0|>\n try:\n return EnvironmentInstance.objects.get(pk=pk)\n except EnvironmentInstance.DoesNotExist:\n raise Http404\n<|end_body_0|>\n\n<|body_start_1|>\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance)\n return Response(serializer.data)\n<|end_body_1|>\n\n<|body_start_2|>\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n\n<|body_start_3|>\n env_instance = self.get_object(pk)\n env_instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_3|>\n", "class_docstring": "Retrieve, update or delete a EnvironmentInstance instance.", "class_name": "EnvironmentInstanceDetails", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EnvironmentInstanceDetails:\n \"\"\"Retrieve, update or delete a EnvironmentInstance instance.\"\"\"\n\n def get_object(self, pk):\n \"\"\"Get the particular row from the table.\"\"\"\n <|body_0|>\n\n def get(self, request, pk, format=None):\n \"\"\"We are going to add the contact info content along with this pull request\"\"\"\n <|body_1|>\n\n def put(self, request, pk, format=None):\n \"\"\"When requested update the corresponding entry of the table\"\"\"\n <|body_2|>\n\n def delete(self, request, pk, format=None):\n \"\"\"When requested delete the corresponding entry of the table\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return EnvironmentInstance.objects.get(pk=pk)\n except EnvironmentInstance.DoesNotExist:\n raise Http404\n<|end_body_0|>\n\n<|body_start_1|>\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance)\n return Response(serializer.data)\n<|end_body_1|>\n\n<|body_start_2|>\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n\n<|body_start_3|>\n env_instance = self.get_object(pk)\n env_instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000118", "length_bytes": 15222, "license_type": "permissive", "methods": [{"docstring": "Get the particular row from the table.", "name": "get_object", "signature": "def get_object(self, pk)"}, {"docstring": "We are going to add the contact info content along with this pull request", "name": "get", "signature": "def get(self, request, pk, format=None)"}, {"docstring": "When requested update the corresponding entry of the table", "name": "put", "signature": "def put(self, request, pk, format=None)"}, {"docstring": "When requested delete the corresponding entry of the table", "name": "delete", "signature": "def delete(self, request, pk, format=None)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_053964", "prompt": "Implement the Python class `EnvironmentInstanceDetails` described below.\n\nClass description:\nRetrieve, update or delete a EnvironmentInstance instance.\n\nMethod signatures and docstrings:\n- def get_object(self, pk): Get the particular row from the table.\n- def get(self, request, pk, format=None): We are going to add the contact info content along with this pull request\n- def put(self, request, pk, format=None): When requested update the corresponding entry of the table\n- def delete(self, request, pk, format=None): When requested delete the corresponding entry of the table", "prompted_full_text": "Implement the Python class `EnvironmentInstanceDetails` described below.\n\nClass description:\nRetrieve, update or delete a EnvironmentInstance instance.\n\nMethod signatures and docstrings:\n- def get_object(self, pk): Get the particular row from the table.\n- def get(self, request, pk, format=None): We are going to add the contact info content along with this pull request\n- def put(self, request, pk, format=None): When requested update the corresponding entry of the table\n- def delete(self, request, pk, format=None): When requested delete the corresponding entry of the table\n\n<|skeleton|>\nclass EnvironmentInstanceDetails:\n \"\"\"Retrieve, update or delete a EnvironmentInstance instance.\"\"\"\n\n def get_object(self, pk):\n \"\"\"Get the particular row from the table.\"\"\"\n <|body_0|>\n\n def get(self, request, pk, format=None):\n \"\"\"We are going to add the contact info content along with this pull request\"\"\"\n <|body_1|>\n\n def put(self, request, pk, format=None):\n \"\"\"When requested update the corresponding entry of the table\"\"\"\n <|body_2|>\n\n def delete(self, request, pk, format=None):\n \"\"\"When requested delete the corresponding entry of the table\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return EnvironmentInstance.objects.get(pk=pk)\n except EnvironmentInstance.DoesNotExist:\n raise Http404\n<|end_body_0|>\n\n<|body_start_1|>\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance)\n return Response(serializer.data)\n<|end_body_1|>\n\n<|body_start_2|>\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n<|end_body_2|>\n\n<|body_start_3|>\n env_instance = self.get_object(pk)\n env_instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_3|>\n", "revision_id": "b0635e72338e14dad24f1ee0329212cd60a3e83a", "skeleton": "<|skeleton|>\nclass EnvironmentInstanceDetails:\n \"\"\"Retrieve, update or delete a EnvironmentInstance instance.\"\"\"\n\n def get_object(self, pk):\n \"\"\"Get the particular row from the table.\"\"\"\n <|body_0|>\n\n def get(self, request, pk, format=None):\n \"\"\"We are going to add the contact info content along with this pull request\"\"\"\n <|body_1|>\n\n def put(self, request, pk, format=None):\n \"\"\"When requested update the corresponding entry of the table\"\"\"\n <|body_2|>\n\n def delete(self, request, pk, format=None):\n \"\"\"When requested delete the corresponding entry of the table\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EnvironmentInstanceDetails:\n \"\"\"Retrieve, update or delete a EnvironmentInstance instance.\"\"\"\n\n def get_object(self, pk):\n \"\"\"Get the particular row from the table.\"\"\"\n try:\n return EnvironmentInstance.objects.get(pk=pk)\n except EnvironmentInstance.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n \"\"\"We are going to add the contact info content along with this pull request\"\"\"\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance)\n return Response(serializer.data)\n\n def put(self, request, pk, format=None):\n \"\"\"When requested update the corresponding entry of the table\"\"\"\n env_instance = self.get_object(pk)\n serializer = EnvironmentInstanceSerializer(env_instance, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, pk, format=None):\n \"\"\"When requested delete the corresponding entry of the table\"\"\"\n env_instance = self.get_object(pk)\n env_instance.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "source": "the_stack_v2_python_sparse", "source_path": "environment/views.py", "source_repo": "faisaltheparttimecoder/carelogBackend", "split": "test", "star_events_count": 1} {"blob_id": "c76a056c9bbde16bc6d50fadcb44081f3c54c2fd", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "Proto file describing the Shared Set service. Service to manage shared sets.", "class_name": "SharedSetServiceServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SharedSetServiceServicer:\n \"\"\"Proto file describing the Shared Set service. Service to manage shared sets.\"\"\"\n\n def GetSharedSet(self, request, context):\n \"\"\"Returns the requested shared set in full detail.\"\"\"\n <|body_0|>\n\n def MutateSharedSets(self, request, context):\n \"\"\"Creates, updates, or removes shared sets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000119", "length_bytes": 5356, "license_type": "permissive", "methods": [{"docstring": "Returns the requested shared set in full detail.", "name": "GetSharedSet", "signature": "def GetSharedSet(self, request, context)"}, {"docstring": "Creates, updates, or removes shared sets. Operation statuses are returned.", "name": "MutateSharedSets", "signature": "def MutateSharedSets(self, request, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043540", "prompt": "Implement the Python class `SharedSetServiceServicer` described below.\n\nClass description:\nProto file describing the Shared Set service. Service to manage shared sets.\n\nMethod signatures and docstrings:\n- def GetSharedSet(self, request, context): Returns the requested shared set in full detail.\n- def MutateSharedSets(self, request, context): Creates, updates, or removes shared sets. Operation statuses are returned.", "prompted_full_text": "Implement the Python class `SharedSetServiceServicer` described below.\n\nClass description:\nProto file describing the Shared Set service. Service to manage shared sets.\n\nMethod signatures and docstrings:\n- def GetSharedSet(self, request, context): Returns the requested shared set in full detail.\n- def MutateSharedSets(self, request, context): Creates, updates, or removes shared sets. Operation statuses are returned.\n\n<|skeleton|>\nclass SharedSetServiceServicer:\n \"\"\"Proto file describing the Shared Set service. Service to manage shared sets.\"\"\"\n\n def GetSharedSet(self, request, context):\n \"\"\"Returns the requested shared set in full detail.\"\"\"\n <|body_0|>\n\n def MutateSharedSets(self, request, context):\n \"\"\"Creates, updates, or removes shared sets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "969eff5b6c3cec59d21191fa178cffb6270074c3", "skeleton": "<|skeleton|>\nclass SharedSetServiceServicer:\n \"\"\"Proto file describing the Shared Set service. Service to manage shared sets.\"\"\"\n\n def GetSharedSet(self, request, context):\n \"\"\"Returns the requested shared set in full detail.\"\"\"\n <|body_0|>\n\n def MutateSharedSets(self, request, context):\n \"\"\"Creates, updates, or removes shared sets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SharedSetServiceServicer:\n \"\"\"Proto file describing the Shared Set service. Service to manage shared sets.\"\"\"\n\n def GetSharedSet(self, request, context):\n \"\"\"Returns the requested shared set in full detail.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def MutateSharedSets(self, request, context):\n \"\"\"Creates, updates, or removes shared sets. Operation statuses are returned.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "google/ads/google_ads/v6/proto/services/shared_set_service_pb2_grpc.py", "source_repo": "VincentFritzsche/google-ads-python", "split": "test", "star_events_count": 0} {"blob_id": "6eb1789bf74663c0e332d464aa84c6d3be9bd06b", "bodies": ["if not root:\n return []\nleft = self.inorderTraversal(root.left)\nright = self.inorderTraversal(root.right)\nreturn left + [root.val] + right", "stack, path = ([], [])\nwhile root or stack:\n if root:\n stack.append(root)\n root = root.left\n else:\n node = stack.pop()\n path.append(node.val)\n root = node.right\nreturn path"], "bodies_text": "<|body_start_0|>\n if not root:\n return []\n left = self.inorderTraversal(root.left)\n right = self.inorderTraversal(root.right)\n return left + [root.val] + right\n<|end_body_0|>\n\n<|body_start_1|>\n stack, path = ([], [])\n while root or stack:\n if root:\n stack.append(root)\n root = root.left\n else:\n node = stack.pop()\n path.append(node.val)\n root = node.right\n return path\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def inorderTraversal1(self, root: TreeNode) -> List[int]:\n \"\"\"思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右\"\"\"\n <|body_0|>\n\n def inorderTraversal2(self, root: TreeNode) -> List[int]:\n \"\"\"思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return []\n left = self.inorderTraversal(root.left)\n right = self.inorderTraversal(root.right)\n return left + [root.val] + right\n<|end_body_0|>\n\n<|body_start_1|>\n stack, path = ([], [])\n while root or stack:\n if root:\n stack.append(root)\n root = root.left\n else:\n node = stack.pop()\n path.append(node.val)\n root = node.right\n return path\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000120", "length_bytes": 1558, "license_type": "no_license", "methods": [{"docstring": "思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右", "name": "inorderTraversal1", "signature": "def inorderTraversal1(self, root: TreeNode) -> List[int]"}, {"docstring": "思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开", "name": "inorderTraversal2", "signature": "def inorderTraversal2(self, root: TreeNode) -> List[int]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_023509", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def inorderTraversal1(self, root: TreeNode) -> List[int]: 思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右\n- def inorderTraversal2(self, root: TreeNode) -> List[int]: 思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def inorderTraversal1(self, root: TreeNode) -> List[int]: 思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右\n- def inorderTraversal2(self, root: TreeNode) -> List[int]: 思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开\n\n<|skeleton|>\nclass Solution:\n\n def inorderTraversal1(self, root: TreeNode) -> List[int]:\n \"\"\"思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右\"\"\"\n <|body_0|>\n\n def inorderTraversal2(self, root: TreeNode) -> List[int]:\n \"\"\"思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return []\n left = self.inorderTraversal(root.left)\n right = self.inorderTraversal(root.right)\n return left + [root.val] + right\n<|end_body_0|>\n\n<|body_start_1|>\n stack, path = ([], [])\n while root or stack:\n if root:\n stack.append(root)\n root = root.left\n else:\n node = stack.pop()\n path.append(node.val)\n root = node.right\n return path\n<|end_body_1|>\n", "revision_id": "4994b8b19abcdbcc0bda2944350e325242fadfd1", "skeleton": "<|skeleton|>\nclass Solution:\n\n def inorderTraversal1(self, root: TreeNode) -> List[int]:\n \"\"\"思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右\"\"\"\n <|body_0|>\n\n def inorderTraversal2(self, root: TreeNode) -> List[int]:\n \"\"\"思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def inorderTraversal1(self, root: TreeNode) -> List[int]:\n \"\"\"思路: 递归 1。 终止条件: root 为空 2。 返回值 [root.val] 3. level task: 左 + 根 + 右\"\"\"\n if not root:\n return []\n left = self.inorderTraversal(root.left)\n right = self.inorderTraversal(root.right)\n return left + [root.val] + right\n\n def inorderTraversal2(self, root: TreeNode) -> List[int]:\n \"\"\"思路:非递归, 使用辅助stack,push root.left 直到找到根节点 node,此时放入结果, 使用 临时变量 为 tmp= node.right 注意: 中序遍历的非递归必须熟练掌握,应为 bst 中序遍历的结果为递增序列, 很多变形可由此展开\"\"\"\n stack, path = ([], [])\n while root or stack:\n if root:\n stack.append(root)\n root = root.left\n else:\n node = stack.pop()\n path.append(node.val)\n root = node.right\n return path\n", "source": "the_stack_v2_python_sparse", "source_path": "Week_02/inorder.py", "source_repo": "NanZhang715/AlgorithmCHUNZHAO", "split": "test", "star_events_count": 0} {"blob_id": "f3d83911dc6f77e54213e8fd70ca3547496bc867", "bodies": ["perms = [[]]\nfor n in nums:\n new_perm = []\n for perm in perms:\n for i in range(len(perm) + 1):\n new_perm.append(perm[:i] + [n] + perm[i:])\n if i < len(perm) and n == perm[i]:\n break\n perms = new_perm\nreturn perms", "res = []\nif len(nums) == 0:\n return res\nif len(nums) == 1:\n return [nums]\nnums.sort()\nfor i in range(len(nums)):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in self.permuteUnique(nums[:i] + nums[i + 1:]):\n res.append([nums[i]] + j)\nreturn res"], "bodies_text": "<|body_start_0|>\n perms = [[]]\n for n in nums:\n new_perm = []\n for perm in perms:\n for i in range(len(perm) + 1):\n new_perm.append(perm[:i] + [n] + perm[i:])\n if i < len(perm) and n == perm[i]:\n break\n perms = new_perm\n return perms\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n if len(nums) == 0:\n return res\n if len(nums) == 1:\n return [nums]\n nums.sort()\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in self.permuteUnique(nums[:i] + nums[i + 1:]):\n res.append([nums[i]] + j)\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def permuteUnique1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n perms = [[]]\n for n in nums:\n new_perm = []\n for perm in perms:\n for i in range(len(perm) + 1):\n new_perm.append(perm[:i] + [n] + perm[i:])\n if i < len(perm) and n == perm[i]:\n break\n perms = new_perm\n return perms\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n if len(nums) == 0:\n return res\n if len(nums) == 1:\n return [nums]\n nums.sort()\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in self.permuteUnique(nums[:i] + nums[i + 1:]):\n res.append([nums[i]] + j)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000121", "length_bytes": 1035, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: List[List[int]]", "name": "permuteUnique", "signature": "def permuteUnique(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: List[List[int]]", "name": "permuteUnique1", "signature": "def permuteUnique1(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003248", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permuteUnique(self, nums): :type nums: List[int] :rtype: List[List[int]]\n- def permuteUnique1(self, nums): :type nums: List[int] :rtype: List[List[int]]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def permuteUnique(self, nums): :type nums: List[int] :rtype: List[List[int]]\n- def permuteUnique1(self, nums): :type nums: List[int] :rtype: List[List[int]]\n\n<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def permuteUnique1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n perms = [[]]\n for n in nums:\n new_perm = []\n for perm in perms:\n for i in range(len(perm) + 1):\n new_perm.append(perm[:i] + [n] + perm[i:])\n if i < len(perm) and n == perm[i]:\n break\n perms = new_perm\n return perms\n<|end_body_0|>\n\n<|body_start_1|>\n res = []\n if len(nums) == 0:\n return res\n if len(nums) == 1:\n return [nums]\n nums.sort()\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in self.permuteUnique(nums[:i] + nums[i + 1:]):\n res.append([nums[i]] + j)\n return res\n<|end_body_1|>\n", "revision_id": "863b89be674a82eef60c0f33d726ac08d43f2e01", "skeleton": "<|skeleton|>\nclass Solution:\n\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_0|>\n\n def permuteUnique1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def permuteUnique(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n perms = [[]]\n for n in nums:\n new_perm = []\n for perm in perms:\n for i in range(len(perm) + 1):\n new_perm.append(perm[:i] + [n] + perm[i:])\n if i < len(perm) and n == perm[i]:\n break\n perms = new_perm\n return perms\n\n def permuteUnique1(self, nums):\n \"\"\":type nums: List[int] :rtype: List[List[int]]\"\"\"\n res = []\n if len(nums) == 0:\n return res\n if len(nums) == 1:\n return [nums]\n nums.sort()\n for i in range(len(nums)):\n if i > 0 and nums[i] == nums[i - 1]:\n continue\n for j in self.permuteUnique(nums[:i] + nums[i + 1:]):\n res.append([nums[i]] + j)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "q47_Permutaions_II.py", "source_repo": "Ryuya1995/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "0f8d2ecef4f95c75a59ceaa6267a8841fd9c93e8", "bodies": ["p_list = list(p)\nif isinstance(p[1], dict):\n p[0] = p[1]\n if 'NO' in p_list:\n p[0]['encrypt']['salt'] = False\n elif 'USING' in p_list:\n p[0]['encrypt']['encryption_algorithm'] = p_list[-1]\n elif 'SALT' not in p_list:\n p[0]['encrypt']['integrity_algorithm'] = p_list[-1]\nelse:\n p[0] = {'encrypt': {'salt': True, 'encryption_algorithm': \"'AES192'\", 'integrity_algorithm': 'SHA-1'}}", "p_list = remove_par(list(p))\nparam = {}\nif len(p_list) == 4:\n param = {p_list[2].lower(): p_list[3]}\nif isinstance(p_list[1], dict):\n p[0] = p[1]\nelse:\n p[0] = {}\np[0].update(param)", "p_list = list(p)\np[0] = p[1]\np[0]['storage'] = p_list[-1]"], "bodies_text": "<|body_start_0|>\n p_list = list(p)\n if isinstance(p[1], dict):\n p[0] = p[1]\n if 'NO' in p_list:\n p[0]['encrypt']['salt'] = False\n elif 'USING' in p_list:\n p[0]['encrypt']['encryption_algorithm'] = p_list[-1]\n elif 'SALT' not in p_list:\n p[0]['encrypt']['integrity_algorithm'] = p_list[-1]\n else:\n p[0] = {'encrypt': {'salt': True, 'encryption_algorithm': \"'AES192'\", 'integrity_algorithm': 'SHA-1'}}\n<|end_body_0|>\n\n<|body_start_1|>\n p_list = remove_par(list(p))\n param = {}\n if len(p_list) == 4:\n param = {p_list[2].lower(): p_list[3]}\n if isinstance(p_list[1], dict):\n p[0] = p[1]\n else:\n p[0] = {}\n p[0].update(param)\n<|end_body_1|>\n\n<|body_start_2|>\n p_list = list(p)\n p[0] = p[1]\n p[0]['storage'] = p_list[-1]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Oracle", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Oracle:\n\n def p_encrypt(self, p):\n \"\"\"encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING\"\"\"\n <|body_0|>\n\n def p_storage(self, p):\n \"\"\"storage : STORAGE LP | storage id id | storage id id RP\"\"\"\n <|body_1|>\n\n def p_expr_storage(self, p):\n \"\"\"expr : expr storage\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n p_list = list(p)\n if isinstance(p[1], dict):\n p[0] = p[1]\n if 'NO' in p_list:\n p[0]['encrypt']['salt'] = False\n elif 'USING' in p_list:\n p[0]['encrypt']['encryption_algorithm'] = p_list[-1]\n elif 'SALT' not in p_list:\n p[0]['encrypt']['integrity_algorithm'] = p_list[-1]\n else:\n p[0] = {'encrypt': {'salt': True, 'encryption_algorithm': \"'AES192'\", 'integrity_algorithm': 'SHA-1'}}\n<|end_body_0|>\n\n<|body_start_1|>\n p_list = remove_par(list(p))\n param = {}\n if len(p_list) == 4:\n param = {p_list[2].lower(): p_list[3]}\n if isinstance(p_list[1], dict):\n p[0] = p[1]\n else:\n p[0] = {}\n p[0].update(param)\n<|end_body_1|>\n\n<|body_start_2|>\n p_list = list(p)\n p[0] = p[1]\n p[0]['storage'] = p_list[-1]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000122", "length_bytes": 1438, "license_type": "permissive", "methods": [{"docstring": "encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING", "name": "p_encrypt", "signature": "def p_encrypt(self, p)"}, {"docstring": "storage : STORAGE LP | storage id id | storage id id RP", "name": "p_storage", "signature": "def p_storage(self, p)"}, {"docstring": "expr : expr storage", "name": "p_expr_storage", "signature": "def p_expr_storage(self, p)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_054040", "prompt": "Implement the Python class `Oracle` described below.\n\nClass description:\nImplement the Oracle class.\n\nMethod signatures and docstrings:\n- def p_encrypt(self, p): encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING\n- def p_storage(self, p): storage : STORAGE LP | storage id id | storage id id RP\n- def p_expr_storage(self, p): expr : expr storage", "prompted_full_text": "Implement the Python class `Oracle` described below.\n\nClass description:\nImplement the Oracle class.\n\nMethod signatures and docstrings:\n- def p_encrypt(self, p): encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING\n- def p_storage(self, p): storage : STORAGE LP | storage id id | storage id id RP\n- def p_expr_storage(self, p): expr : expr storage\n\n<|skeleton|>\nclass Oracle:\n\n def p_encrypt(self, p):\n \"\"\"encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING\"\"\"\n <|body_0|>\n\n def p_storage(self, p):\n \"\"\"storage : STORAGE LP | storage id id | storage id id RP\"\"\"\n <|body_1|>\n\n def p_expr_storage(self, p):\n \"\"\"expr : expr storage\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n p_list = list(p)\n if isinstance(p[1], dict):\n p[0] = p[1]\n if 'NO' in p_list:\n p[0]['encrypt']['salt'] = False\n elif 'USING' in p_list:\n p[0]['encrypt']['encryption_algorithm'] = p_list[-1]\n elif 'SALT' not in p_list:\n p[0]['encrypt']['integrity_algorithm'] = p_list[-1]\n else:\n p[0] = {'encrypt': {'salt': True, 'encryption_algorithm': \"'AES192'\", 'integrity_algorithm': 'SHA-1'}}\n<|end_body_0|>\n\n<|body_start_1|>\n p_list = remove_par(list(p))\n param = {}\n if len(p_list) == 4:\n param = {p_list[2].lower(): p_list[3]}\n if isinstance(p_list[1], dict):\n p[0] = p[1]\n else:\n p[0] = {}\n p[0].update(param)\n<|end_body_1|>\n\n<|body_start_2|>\n p_list = list(p)\n p[0] = p[1]\n p[0]['storage'] = p_list[-1]\n<|end_body_2|>\n", "revision_id": "8f69c9c3b58990f0d47dbe868fe4a572d51e2de7", "skeleton": "<|skeleton|>\nclass Oracle:\n\n def p_encrypt(self, p):\n \"\"\"encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING\"\"\"\n <|body_0|>\n\n def p_storage(self, p):\n \"\"\"storage : STORAGE LP | storage id id | storage id id RP\"\"\"\n <|body_1|>\n\n def p_expr_storage(self, p):\n \"\"\"expr : expr storage\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Oracle:\n def p_encrypt(self, p):\n \"\"\"encrypt : ENCRYPT | encrypt NO SALT | encrypt SALT | encrypt USING STRING | encrypt STRING\"\"\"\n p_list = list(p)\n if isinstance(p[1], dict):\n p[0] = p[1]\n if 'NO' in p_list:\n p[0]['encrypt']['salt'] = False\n elif 'USING' in p_list:\n p[0]['encrypt']['encryption_algorithm'] = p_list[-1]\n elif 'SALT' not in p_list:\n p[0]['encrypt']['integrity_algorithm'] = p_list[-1]\n else:\n p[0] = {'encrypt': {'salt': True, 'encryption_algorithm': \"'AES192'\", 'integrity_algorithm': 'SHA-1'}}\n\n def p_storage(self, p):\n \"\"\"storage : STORAGE LP | storage id id | storage id id RP\"\"\"\n p_list = remove_par(list(p))\n param = {}\n if len(p_list) == 4:\n param = {p_list[2].lower(): p_list[3]}\n if isinstance(p_list[1], dict):\n p[0] = p[1]\n else:\n p[0] = {}\n p[0].update(param)\n\n def p_expr_storage(self, p):\n \"\"\"expr : expr storage\"\"\"\n p_list = list(p)\n p[0] = p[1]\n p[0]['storage'] = p_list[-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "simple_ddl_parser/dialects/oracle.py", "source_repo": "bjmc/simple-ddl-parser", "split": "test", "star_events_count": 0} {"blob_id": "1101252e4a0da3f106ed0c7355b191d927e73fc7", "bodies": ["self.api = EnrollmentApi(self.locust.host, self.client)\nself.auto_auth()\nfor course_id in settings.data['COURSE_ID_LIST']:\n self.api.enroll(course_id)", "course_id = random.choice(settings.data['COURSE_ID_LIST'])\ntry:\n self.api.get_user_enrollment_status(self.username, course_id)\nexcept NotAuthorizedException:\n self.auto_auth()", "try:\n self.api.get_student_enrollments()\nexcept NotAuthorizedException:\n self.auto_auth()"], "bodies_text": "<|body_start_0|>\n self.api = EnrollmentApi(self.locust.host, self.client)\n self.auto_auth()\n for course_id in settings.data['COURSE_ID_LIST']:\n self.api.enroll(course_id)\n<|end_body_0|>\n\n<|body_start_1|>\n course_id = random.choice(settings.data['COURSE_ID_LIST'])\n try:\n self.api.get_user_enrollment_status(self.username, course_id)\n except NotAuthorizedException:\n self.auto_auth()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.api.get_student_enrollments()\n except NotAuthorizedException:\n self.auto_auth()\n<|end_body_2|>\n", "class_docstring": "User scripts in which the user is already authenticated and enrolled.", "class_name": "AuthenticatedAndEnrolledTasks", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuthenticatedAndEnrolledTasks:\n \"\"\"User scripts in which the user is already authenticated and enrolled.\"\"\"\n\n def on_start(self):\n \"\"\"Ensure the user is logged in and enrolled.\"\"\"\n <|body_0|>\n\n def user_enrollment_status(self):\n \"\"\"Check a user's enrollment status in a course.\"\"\"\n <|body_1|>\n\n def list_enrollments(self):\n \"\"\"Get all enrollments for a user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.api = EnrollmentApi(self.locust.host, self.client)\n self.auto_auth()\n for course_id in settings.data['COURSE_ID_LIST']:\n self.api.enroll(course_id)\n<|end_body_0|>\n\n<|body_start_1|>\n course_id = random.choice(settings.data['COURSE_ID_LIST'])\n try:\n self.api.get_user_enrollment_status(self.username, course_id)\n except NotAuthorizedException:\n self.auto_auth()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.api.get_student_enrollments()\n except NotAuthorizedException:\n self.auto_auth()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000123", "length_bytes": 8385, "license_type": "permissive", "methods": [{"docstring": "Ensure the user is logged in and enrolled.", "name": "on_start", "signature": "def on_start(self)"}, {"docstring": "Check a user's enrollment status in a course.", "name": "user_enrollment_status", "signature": "def user_enrollment_status(self)"}, {"docstring": "Get all enrollments for a user.", "name": "list_enrollments", "signature": "def list_enrollments(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_023206", "prompt": "Implement the Python class `AuthenticatedAndEnrolledTasks` described below.\n\nClass description:\nUser scripts in which the user is already authenticated and enrolled.\n\nMethod signatures and docstrings:\n- def on_start(self): Ensure the user is logged in and enrolled.\n- def user_enrollment_status(self): Check a user's enrollment status in a course.\n- def list_enrollments(self): Get all enrollments for a user.", "prompted_full_text": "Implement the Python class `AuthenticatedAndEnrolledTasks` described below.\n\nClass description:\nUser scripts in which the user is already authenticated and enrolled.\n\nMethod signatures and docstrings:\n- def on_start(self): Ensure the user is logged in and enrolled.\n- def user_enrollment_status(self): Check a user's enrollment status in a course.\n- def list_enrollments(self): Get all enrollments for a user.\n\n<|skeleton|>\nclass AuthenticatedAndEnrolledTasks:\n \"\"\"User scripts in which the user is already authenticated and enrolled.\"\"\"\n\n def on_start(self):\n \"\"\"Ensure the user is logged in and enrolled.\"\"\"\n <|body_0|>\n\n def user_enrollment_status(self):\n \"\"\"Check a user's enrollment status in a course.\"\"\"\n <|body_1|>\n\n def list_enrollments(self):\n \"\"\"Get all enrollments for a user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.api = EnrollmentApi(self.locust.host, self.client)\n self.auto_auth()\n for course_id in settings.data['COURSE_ID_LIST']:\n self.api.enroll(course_id)\n<|end_body_0|>\n\n<|body_start_1|>\n course_id = random.choice(settings.data['COURSE_ID_LIST'])\n try:\n self.api.get_user_enrollment_status(self.username, course_id)\n except NotAuthorizedException:\n self.auto_auth()\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.api.get_student_enrollments()\n except NotAuthorizedException:\n self.auto_auth()\n<|end_body_2|>\n", "revision_id": "1a6dc891d2fb72575f354521988a531489f30032", "skeleton": "<|skeleton|>\nclass AuthenticatedAndEnrolledTasks:\n \"\"\"User scripts in which the user is already authenticated and enrolled.\"\"\"\n\n def on_start(self):\n \"\"\"Ensure the user is logged in and enrolled.\"\"\"\n <|body_0|>\n\n def user_enrollment_status(self):\n \"\"\"Check a user's enrollment status in a course.\"\"\"\n <|body_1|>\n\n def list_enrollments(self):\n \"\"\"Get all enrollments for a user.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuthenticatedAndEnrolledTasks:\n \"\"\"User scripts in which the user is already authenticated and enrolled.\"\"\"\n\n def on_start(self):\n \"\"\"Ensure the user is logged in and enrolled.\"\"\"\n self.api = EnrollmentApi(self.locust.host, self.client)\n self.auto_auth()\n for course_id in settings.data['COURSE_ID_LIST']:\n self.api.enroll(course_id)\n\n def user_enrollment_status(self):\n \"\"\"Check a user's enrollment status in a course.\"\"\"\n course_id = random.choice(settings.data['COURSE_ID_LIST'])\n try:\n self.api.get_user_enrollment_status(self.username, course_id)\n except NotAuthorizedException:\n self.auto_auth()\n\n def list_enrollments(self):\n \"\"\"Get all enrollments for a user.\"\"\"\n try:\n self.api.get_student_enrollments()\n except NotAuthorizedException:\n self.auto_auth()\n", "source": "the_stack_v2_python_sparse", "source_path": "loadtests/enrollment/locustfile.py", "source_repo": "kavithachandra/edx-load-tests", "split": "test", "star_events_count": 0} {"blob_id": "ebad75334bd90576ff22dfb83fc07193b10c6ca3", "bodies": ["n = len(prices)\nif n <= 1:\n return 0\npre = 0\nres = 0\nfor i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n pre = cur\nreturn res", "n = len(prices)\nif n <= 1:\n return 0\npre = 0\nres = 0\nleft = [0]\nfor i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n left.append(res)\n pre = cur\npre = 0\nres = 0\nright = [0]\nfor i in range(n - 2, -1, -1):\n cur = prices[i + 1] - prices[i]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n right.insert(0, res)\n pre = cur\nres = max(left[-1], right[0])\nfor i in range(n - 1):\n cur = left[i] + right[i + 1]\n res = max(cur, res)\nreturn res"], "bodies_text": "<|body_start_0|>\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n pre = cur\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n left = [0]\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n left.append(res)\n pre = cur\n pre = 0\n res = 0\n right = [0]\n for i in range(n - 2, -1, -1):\n cur = prices[i + 1] - prices[i]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n right.insert(0, res)\n pre = cur\n res = max(left[-1], right[0])\n for i in range(n - 1):\n cur = left[i] + right[i + 1]\n res = max(cur, res)\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n pre = cur\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n left = [0]\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n left.append(res)\n pre = cur\n pre = 0\n res = 0\n right = [0]\n for i in range(n - 2, -1, -1):\n cur = prices[i + 1] - prices[i]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n right.insert(0, res)\n pre = cur\n res = max(left[-1], right[0])\n for i in range(n - 1):\n cur = left[i] + right[i + 1]\n res = max(cur, res)\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000124", "length_bytes": 1256, "license_type": "no_license", "methods": [{"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfit", "signature": "def maxProfit(self, prices)"}, {"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfit", "signature": "def maxProfit(self, prices)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021299", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n pre = cur\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n left = [0]\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n left.append(res)\n pre = cur\n pre = 0\n res = 0\n right = [0]\n for i in range(n - 2, -1, -1):\n cur = prices[i + 1] - prices[i]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n right.insert(0, res)\n pre = cur\n res = max(left[-1], right[0])\n for i in range(n - 1):\n cur = left[i] + right[i + 1]\n res = max(cur, res)\n return res\n<|end_body_1|>\n", "revision_id": "176cc1db3291843fb068f06d0180766dd8c3122c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n pre = cur\n return res\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n n = len(prices)\n if n <= 1:\n return 0\n pre = 0\n res = 0\n left = [0]\n for i in range(1, n):\n cur = prices[i] - prices[i - 1]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n left.append(res)\n pre = cur\n pre = 0\n res = 0\n right = [0]\n for i in range(n - 2, -1, -1):\n cur = prices[i + 1] - prices[i]\n if pre > 0:\n cur += pre\n res = max(res, cur)\n right.insert(0, res)\n pre = cur\n res = max(left[-1], right[0])\n for i in range(n - 1):\n cur = left[i] + right[i + 1]\n res = max(cur, res)\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "2019/dynamic_programming/best_time_to_buy_and_sell_stock_121.py", "source_repo": "yehongyu/acode", "split": "test", "star_events_count": 0} {"blob_id": "7bad4c25bcba9a0290f8acaea162e63258a4c34e", "bodies": ["moveit_commander.roscpp_initialize(sys.argv)\nself._parameters = column_parameters\nself.robot = moveit_commander.RobotCommander()\nself.scene = moveit_commander.PlanningSceneInterface()\nself.group = moveit_commander.MoveGroupCommander(self._parameters['Palbator_column_move_group'])\nself.display_trajectory_publisher = rospy.Publisher(self._parameters['display_column_planned_path_topic'], moveit_msgs.msg.DisplayTrajectory, queue_size=10)\nself.minimum_column = self._parameters['minimum_height']\nself.maximum_column = self._parameters['maximum_height']\nrospy.logwarn('{class_name} : COLUMN CONTROLLER ON'.format(class_name=self.__class__.__name__))", "rospy.loginfo('{class_name} : Column move request to position %s'.format(class_name=self.__class__.__name__), pose_name)\nself.group.set_named_target(pose_name)\nplan1 = self.group.plan()\nself.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\nself.display_trajectory.trajectory_start = self.robot.get_current_state()\nself.display_trajectory.trajectory.append(plan1)\nself.display_trajectory_publisher.publish(self.display_trajectory)\nrospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\nself.group.go(wait=True)\nrospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))", "rospy.loginfo('{class_name} : Column move request to coordinate Z %s'.format(class_name=self.__class__.__name__), str(z_target))\ncolumn_pose_target = Pose()\nif z_target < self.minimum_column:\n column_pose_target.position.z = self.minimum_column\nelif z_target > self.maximum_column:\n column_pose_target.position.z = self.maximum_column\nelse:\n column_pose_target.position.z = z_target\ncolumn_pose_target.orientation.w = 1.0\nself.group.set_pose_reference_frame('base_footprint')\nself.group.set_joint_value_target(column_pose_target, True)\nplan1 = self.group.plan()\nself.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\nself.display_trajectory.trajectory_start = self.robot.get_current_state()\nself.display_trajectory.trajectory.append(plan1)\nself.display_trajectory_publisher.publish(self.display_trajectory)\nrospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\nself.group.go(wait=True)\nrospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))"], "bodies_text": "<|body_start_0|>\n moveit_commander.roscpp_initialize(sys.argv)\n self._parameters = column_parameters\n self.robot = moveit_commander.RobotCommander()\n self.scene = moveit_commander.PlanningSceneInterface()\n self.group = moveit_commander.MoveGroupCommander(self._parameters['Palbator_column_move_group'])\n self.display_trajectory_publisher = rospy.Publisher(self._parameters['display_column_planned_path_topic'], moveit_msgs.msg.DisplayTrajectory, queue_size=10)\n self.minimum_column = self._parameters['minimum_height']\n self.maximum_column = self._parameters['maximum_height']\n rospy.logwarn('{class_name} : COLUMN CONTROLLER ON'.format(class_name=self.__class__.__name__))\n<|end_body_0|>\n\n<|body_start_1|>\n rospy.loginfo('{class_name} : Column move request to position %s'.format(class_name=self.__class__.__name__), pose_name)\n self.group.set_named_target(pose_name)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n<|end_body_1|>\n\n<|body_start_2|>\n rospy.loginfo('{class_name} : Column move request to coordinate Z %s'.format(class_name=self.__class__.__name__), str(z_target))\n column_pose_target = Pose()\n if z_target < self.minimum_column:\n column_pose_target.position.z = self.minimum_column\n elif z_target > self.maximum_column:\n column_pose_target.position.z = self.maximum_column\n else:\n column_pose_target.position.z = z_target\n column_pose_target.orientation.w = 1.0\n self.group.set_pose_reference_frame('base_footprint')\n self.group.set_joint_value_target(column_pose_target, True)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "MoveitColumnController", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MoveitColumnController:\n\n def __init__(self, column_parameters):\n \"\"\"Initializes the Palbator Moveit column controller.\"\"\"\n <|body_0|>\n\n def move_column_to_pose(self, pose_name):\n \"\"\"Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string\"\"\"\n <|body_1|>\n\n def move_column(self, z_target):\n \"\"\"Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n moveit_commander.roscpp_initialize(sys.argv)\n self._parameters = column_parameters\n self.robot = moveit_commander.RobotCommander()\n self.scene = moveit_commander.PlanningSceneInterface()\n self.group = moveit_commander.MoveGroupCommander(self._parameters['Palbator_column_move_group'])\n self.display_trajectory_publisher = rospy.Publisher(self._parameters['display_column_planned_path_topic'], moveit_msgs.msg.DisplayTrajectory, queue_size=10)\n self.minimum_column = self._parameters['minimum_height']\n self.maximum_column = self._parameters['maximum_height']\n rospy.logwarn('{class_name} : COLUMN CONTROLLER ON'.format(class_name=self.__class__.__name__))\n<|end_body_0|>\n\n<|body_start_1|>\n rospy.loginfo('{class_name} : Column move request to position %s'.format(class_name=self.__class__.__name__), pose_name)\n self.group.set_named_target(pose_name)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n<|end_body_1|>\n\n<|body_start_2|>\n rospy.loginfo('{class_name} : Column move request to coordinate Z %s'.format(class_name=self.__class__.__name__), str(z_target))\n column_pose_target = Pose()\n if z_target < self.minimum_column:\n column_pose_target.position.z = self.minimum_column\n elif z_target > self.maximum_column:\n column_pose_target.position.z = self.maximum_column\n else:\n column_pose_target.position.z = z_target\n column_pose_target.orientation.w = 1.0\n self.group.set_pose_reference_frame('base_footprint')\n self.group.set_joint_value_target(column_pose_target, True)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000125", "length_bytes": 3805, "license_type": "no_license", "methods": [{"docstring": "Initializes the Palbator Moveit column controller.", "name": "__init__", "signature": "def __init__(self, column_parameters)"}, {"docstring": "Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string", "name": "move_column_to_pose", "signature": "def move_column_to_pose(self, pose_name)"}, {"docstring": "Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float", "name": "move_column", "signature": "def move_column(self, z_target)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_031029", "prompt": "Implement the Python class `MoveitColumnController` described below.\n\nClass description:\nImplement the MoveitColumnController class.\n\nMethod signatures and docstrings:\n- def __init__(self, column_parameters): Initializes the Palbator Moveit column controller.\n- def move_column_to_pose(self, pose_name): Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string\n- def move_column(self, z_target): Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float", "prompted_full_text": "Implement the Python class `MoveitColumnController` described below.\n\nClass description:\nImplement the MoveitColumnController class.\n\nMethod signatures and docstrings:\n- def __init__(self, column_parameters): Initializes the Palbator Moveit column controller.\n- def move_column_to_pose(self, pose_name): Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string\n- def move_column(self, z_target): Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float\n\n<|skeleton|>\nclass MoveitColumnController:\n\n def __init__(self, column_parameters):\n \"\"\"Initializes the Palbator Moveit column controller.\"\"\"\n <|body_0|>\n\n def move_column_to_pose(self, pose_name):\n \"\"\"Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string\"\"\"\n <|body_1|>\n\n def move_column(self, z_target):\n \"\"\"Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n moveit_commander.roscpp_initialize(sys.argv)\n self._parameters = column_parameters\n self.robot = moveit_commander.RobotCommander()\n self.scene = moveit_commander.PlanningSceneInterface()\n self.group = moveit_commander.MoveGroupCommander(self._parameters['Palbator_column_move_group'])\n self.display_trajectory_publisher = rospy.Publisher(self._parameters['display_column_planned_path_topic'], moveit_msgs.msg.DisplayTrajectory, queue_size=10)\n self.minimum_column = self._parameters['minimum_height']\n self.maximum_column = self._parameters['maximum_height']\n rospy.logwarn('{class_name} : COLUMN CONTROLLER ON'.format(class_name=self.__class__.__name__))\n<|end_body_0|>\n\n<|body_start_1|>\n rospy.loginfo('{class_name} : Column move request to position %s'.format(class_name=self.__class__.__name__), pose_name)\n self.group.set_named_target(pose_name)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n<|end_body_1|>\n\n<|body_start_2|>\n rospy.loginfo('{class_name} : Column move request to coordinate Z %s'.format(class_name=self.__class__.__name__), str(z_target))\n column_pose_target = Pose()\n if z_target < self.minimum_column:\n column_pose_target.position.z = self.minimum_column\n elif z_target > self.maximum_column:\n column_pose_target.position.z = self.maximum_column\n else:\n column_pose_target.position.z = z_target\n column_pose_target.orientation.w = 1.0\n self.group.set_pose_reference_frame('base_footprint')\n self.group.set_joint_value_target(column_pose_target, True)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n<|end_body_2|>\n", "revision_id": "42748e47a03ea3a40593b0b43908bb7b239c7ed7", "skeleton": "<|skeleton|>\nclass MoveitColumnController:\n\n def __init__(self, column_parameters):\n \"\"\"Initializes the Palbator Moveit column controller.\"\"\"\n <|body_0|>\n\n def move_column_to_pose(self, pose_name):\n \"\"\"Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string\"\"\"\n <|body_1|>\n\n def move_column(self, z_target):\n \"\"\"Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MoveitColumnController:\n def __init__(self, column_parameters):\n \"\"\"Initializes the Palbator Moveit column controller.\"\"\"\n moveit_commander.roscpp_initialize(sys.argv)\n self._parameters = column_parameters\n self.robot = moveit_commander.RobotCommander()\n self.scene = moveit_commander.PlanningSceneInterface()\n self.group = moveit_commander.MoveGroupCommander(self._parameters['Palbator_column_move_group'])\n self.display_trajectory_publisher = rospy.Publisher(self._parameters['display_column_planned_path_topic'], moveit_msgs.msg.DisplayTrajectory, queue_size=10)\n self.minimum_column = self._parameters['minimum_height']\n self.maximum_column = self._parameters['maximum_height']\n rospy.logwarn('{class_name} : COLUMN CONTROLLER ON'.format(class_name=self.__class__.__name__))\n\n def move_column_to_pose(self, pose_name):\n \"\"\"Moves the column in a predefined position during Moveit package's configuration giving its name. :param pose_name: name of the position to reach :type pose_name: string\"\"\"\n rospy.loginfo('{class_name} : Column move request to position %s'.format(class_name=self.__class__.__name__), pose_name)\n self.group.set_named_target(pose_name)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n\n def move_column(self, z_target):\n \"\"\"Moves the column in a position on Z axis. :param z_target: Coordinate Z of the point to reach :type z_target: float\"\"\"\n rospy.loginfo('{class_name} : Column move request to coordinate Z %s'.format(class_name=self.__class__.__name__), str(z_target))\n column_pose_target = Pose()\n if z_target < self.minimum_column:\n column_pose_target.position.z = self.minimum_column\n elif z_target > self.maximum_column:\n column_pose_target.position.z = self.maximum_column\n else:\n column_pose_target.position.z = z_target\n column_pose_target.orientation.w = 1.0\n self.group.set_pose_reference_frame('base_footprint')\n self.group.set_joint_value_target(column_pose_target, True)\n plan1 = self.group.plan()\n self.display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n self.display_trajectory.trajectory_start = self.robot.get_current_state()\n self.display_trajectory.trajectory.append(plan1)\n self.display_trajectory_publisher.publish(self.display_trajectory)\n rospy.loginfo('{class_name} : Moving column'.format(class_name=self.__class__.__name__))\n self.group.go(wait=True)\n rospy.loginfo('{class_name} : Column position reached'.format(class_name=self.__class__.__name__))\n", "source": "the_stack_v2_python_sparse", "source_path": "pmb2_robot/pmb2_apps/scripts/moveit_column_controller.py", "source_repo": "CureThomas/Palbator_simulation", "split": "test", "star_events_count": 0} {"blob_id": "138eb3ab5e261f8bc5ec13406a1c5a1a472aa0a6", "bodies": ["self.application_id_local = kwargs.pop('id')\nself.adult = kwargs.pop('adult')\nself.name = kwargs.pop('name')\nsuper(OtherPeopleAdultDBSForm, self).__init__(*args, **kwargs)\nfull_stop_stripper(self)\nif AdultInHome.objects.filter(application_id=self.application_id_local, adult=self.adult).count() > 0:\n adult_record = AdultInHome.objects.get(application_id=self.application_id_local, adult=self.adult)\n self.fields['dbs_certificate_number'].initial = adult_record.dbs_certificate_number\n self.pk = adult_record.adult_id\n self.field_list = ['dbs_certificate_number']", "dbs_certification_key = str(self.prefix) + '-dbs_certificate_number'\ndbs_certificate_number = self.data[dbs_certification_key]\nif len(str(dbs_certificate_number)) > 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\nif len(str(dbs_certificate_number)) < 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\nform_check = household_member_dbs_form_duplicates_check(self.data)\nif not form_check.dbs_numbers_unique:\n if self.adult in form_check.duplicate_entry_indexes:\n logger.debug('Received following form data for other adults DBS entries:' + str(self.data))\n logger.debug('Marking adult ' + str(self.adult) + ' DBS number as duplicate')\n self.add_error('dbs_certificate_number', 'Please enter a different DBS number for each person')\napplication_id = self.data['id']\napplication = Application.objects.get(pk=application_id)\nhousehold_member_dbs_to_test = self['dbs_certificate_number'].data\nchildminder_dbs_check = childminder_dbs_number_duplication_check(application, household_member_dbs_to_test)\nif childminder_dbs_check.duplicates_childminder_dbs:\n self.add_error('dbs_certificate_number', 'Please enter a DBS number that is different from your own')\nreturn dbs_certificate_number"], "bodies_text": "<|body_start_0|>\n self.application_id_local = kwargs.pop('id')\n self.adult = kwargs.pop('adult')\n self.name = kwargs.pop('name')\n super(OtherPeopleAdultDBSForm, self).__init__(*args, **kwargs)\n full_stop_stripper(self)\n if AdultInHome.objects.filter(application_id=self.application_id_local, adult=self.adult).count() > 0:\n adult_record = AdultInHome.objects.get(application_id=self.application_id_local, adult=self.adult)\n self.fields['dbs_certificate_number'].initial = adult_record.dbs_certificate_number\n self.pk = adult_record.adult_id\n self.field_list = ['dbs_certificate_number']\n<|end_body_0|>\n\n<|body_start_1|>\n dbs_certification_key = str(self.prefix) + '-dbs_certificate_number'\n dbs_certificate_number = self.data[dbs_certification_key]\n if len(str(dbs_certificate_number)) > 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n if len(str(dbs_certificate_number)) < 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n form_check = household_member_dbs_form_duplicates_check(self.data)\n if not form_check.dbs_numbers_unique:\n if self.adult in form_check.duplicate_entry_indexes:\n logger.debug('Received following form data for other adults DBS entries:' + str(self.data))\n logger.debug('Marking adult ' + str(self.adult) + ' DBS number as duplicate')\n self.add_error('dbs_certificate_number', 'Please enter a different DBS number for each person')\n application_id = self.data['id']\n application = Application.objects.get(pk=application_id)\n household_member_dbs_to_test = self['dbs_certificate_number'].data\n childminder_dbs_check = childminder_dbs_number_duplication_check(application, household_member_dbs_to_test)\n if childminder_dbs_check.duplicates_childminder_dbs:\n self.add_error('dbs_certificate_number', 'Please enter a DBS number that is different from your own')\n return dbs_certificate_number\n<|end_body_1|>\n", "class_docstring": "GOV.UK form for the People in your home: adult DBS page", "class_name": "OtherPeopleAdultDBSForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OtherPeopleAdultDBSForm:\n \"\"\"GOV.UK form for the People in your home: adult DBS page\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID\"\"\"\n <|body_0|>\n\n def clean_dbs_certificate_number(self):\n \"\"\"DBS certificate number validation :return: integer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.application_id_local = kwargs.pop('id')\n self.adult = kwargs.pop('adult')\n self.name = kwargs.pop('name')\n super(OtherPeopleAdultDBSForm, self).__init__(*args, **kwargs)\n full_stop_stripper(self)\n if AdultInHome.objects.filter(application_id=self.application_id_local, adult=self.adult).count() > 0:\n adult_record = AdultInHome.objects.get(application_id=self.application_id_local, adult=self.adult)\n self.fields['dbs_certificate_number'].initial = adult_record.dbs_certificate_number\n self.pk = adult_record.adult_id\n self.field_list = ['dbs_certificate_number']\n<|end_body_0|>\n\n<|body_start_1|>\n dbs_certification_key = str(self.prefix) + '-dbs_certificate_number'\n dbs_certificate_number = self.data[dbs_certification_key]\n if len(str(dbs_certificate_number)) > 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n if len(str(dbs_certificate_number)) < 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n form_check = household_member_dbs_form_duplicates_check(self.data)\n if not form_check.dbs_numbers_unique:\n if self.adult in form_check.duplicate_entry_indexes:\n logger.debug('Received following form data for other adults DBS entries:' + str(self.data))\n logger.debug('Marking adult ' + str(self.adult) + ' DBS number as duplicate')\n self.add_error('dbs_certificate_number', 'Please enter a different DBS number for each person')\n application_id = self.data['id']\n application = Application.objects.get(pk=application_id)\n household_member_dbs_to_test = self['dbs_certificate_number'].data\n childminder_dbs_check = childminder_dbs_number_duplication_check(application, household_member_dbs_to_test)\n if childminder_dbs_check.duplicates_childminder_dbs:\n self.add_error('dbs_certificate_number', 'Please enter a DBS number that is different from your own')\n return dbs_certificate_number\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000126", "length_bytes": 20631, "license_type": "no_license", "methods": [{"docstring": "Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "DBS certificate number validation :return: integer", "name": "clean_dbs_certificate_number", "signature": "def clean_dbs_certificate_number(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020257", "prompt": "Implement the Python class `OtherPeopleAdultDBSForm` described below.\n\nClass description:\nGOV.UK form for the People in your home: adult DBS page\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID\n- def clean_dbs_certificate_number(self): DBS certificate number validation :return: integer", "prompted_full_text": "Implement the Python class `OtherPeopleAdultDBSForm` described below.\n\nClass description:\nGOV.UK form for the People in your home: adult DBS page\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID\n- def clean_dbs_certificate_number(self): DBS certificate number validation :return: integer\n\n<|skeleton|>\nclass OtherPeopleAdultDBSForm:\n \"\"\"GOV.UK form for the People in your home: adult DBS page\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID\"\"\"\n <|body_0|>\n\n def clean_dbs_certificate_number(self):\n \"\"\"DBS certificate number validation :return: integer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.application_id_local = kwargs.pop('id')\n self.adult = kwargs.pop('adult')\n self.name = kwargs.pop('name')\n super(OtherPeopleAdultDBSForm, self).__init__(*args, **kwargs)\n full_stop_stripper(self)\n if AdultInHome.objects.filter(application_id=self.application_id_local, adult=self.adult).count() > 0:\n adult_record = AdultInHome.objects.get(application_id=self.application_id_local, adult=self.adult)\n self.fields['dbs_certificate_number'].initial = adult_record.dbs_certificate_number\n self.pk = adult_record.adult_id\n self.field_list = ['dbs_certificate_number']\n<|end_body_0|>\n\n<|body_start_1|>\n dbs_certification_key = str(self.prefix) + '-dbs_certificate_number'\n dbs_certificate_number = self.data[dbs_certification_key]\n if len(str(dbs_certificate_number)) > 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n if len(str(dbs_certificate_number)) < 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n form_check = household_member_dbs_form_duplicates_check(self.data)\n if not form_check.dbs_numbers_unique:\n if self.adult in form_check.duplicate_entry_indexes:\n logger.debug('Received following form data for other adults DBS entries:' + str(self.data))\n logger.debug('Marking adult ' + str(self.adult) + ' DBS number as duplicate')\n self.add_error('dbs_certificate_number', 'Please enter a different DBS number for each person')\n application_id = self.data['id']\n application = Application.objects.get(pk=application_id)\n household_member_dbs_to_test = self['dbs_certificate_number'].data\n childminder_dbs_check = childminder_dbs_number_duplication_check(application, household_member_dbs_to_test)\n if childminder_dbs_check.duplicates_childminder_dbs:\n self.add_error('dbs_certificate_number', 'Please enter a DBS number that is different from your own')\n return dbs_certificate_number\n<|end_body_1|>\n", "revision_id": "fa6ca6a8164763e1dfe1581702ca5d36e44859de", "skeleton": "<|skeleton|>\nclass OtherPeopleAdultDBSForm:\n \"\"\"GOV.UK form for the People in your home: adult DBS page\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID\"\"\"\n <|body_0|>\n\n def clean_dbs_certificate_number(self):\n \"\"\"DBS certificate number validation :return: integer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OtherPeopleAdultDBSForm:\n \"\"\"GOV.UK form for the People in your home: adult DBS page\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Method to configure the initialisation of the People in your home: adult DBS form :param args: arguments passed to the form :param kwargs: keyword arguments passed to the form, e.g. application ID\"\"\"\n self.application_id_local = kwargs.pop('id')\n self.adult = kwargs.pop('adult')\n self.name = kwargs.pop('name')\n super(OtherPeopleAdultDBSForm, self).__init__(*args, **kwargs)\n full_stop_stripper(self)\n if AdultInHome.objects.filter(application_id=self.application_id_local, adult=self.adult).count() > 0:\n adult_record = AdultInHome.objects.get(application_id=self.application_id_local, adult=self.adult)\n self.fields['dbs_certificate_number'].initial = adult_record.dbs_certificate_number\n self.pk = adult_record.adult_id\n self.field_list = ['dbs_certificate_number']\n\n def clean_dbs_certificate_number(self):\n \"\"\"DBS certificate number validation :return: integer\"\"\"\n dbs_certification_key = str(self.prefix) + '-dbs_certificate_number'\n dbs_certificate_number = self.data[dbs_certification_key]\n if len(str(dbs_certificate_number)) > 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n if len(str(dbs_certificate_number)) < 12:\n raise forms.ValidationError('The certificate number should be 12 digits long')\n form_check = household_member_dbs_form_duplicates_check(self.data)\n if not form_check.dbs_numbers_unique:\n if self.adult in form_check.duplicate_entry_indexes:\n logger.debug('Received following form data for other adults DBS entries:' + str(self.data))\n logger.debug('Marking adult ' + str(self.adult) + ' DBS number as duplicate')\n self.add_error('dbs_certificate_number', 'Please enter a different DBS number for each person')\n application_id = self.data['id']\n application = Application.objects.get(pk=application_id)\n household_member_dbs_to_test = self['dbs_certificate_number'].data\n childminder_dbs_check = childminder_dbs_number_duplication_check(application, household_member_dbs_to_test)\n if childminder_dbs_check.duplicates_childminder_dbs:\n self.add_error('dbs_certificate_number', 'Please enter a DBS number that is different from your own')\n return dbs_certificate_number\n", "source": "the_stack_v2_python_sparse", "source_path": "application/forms/other_people.py", "source_repo": "IS-JAQU-CAZ/OFS-MORE-Childminder-Website", "split": "test", "star_events_count": 0} {"blob_id": "53fd97c79f227ff2cbcd419262bd41924a4b9913", "bodies": ["base_wrappers = [ProcStat(), ProcSwaps(), ProcUptime(), ProcVersion(), ProcBuddyInfo(), ProcCpuInfo(), ProcMemInfo(), ProcFileSystems(), ProcCrypto(), ProcInterrupts(), ProcPartitions(), ProcModules()]\nfor _file in base_wrappers:\n _file.dump()", "pid_wrappers = [ProcCmdline(pid), ProcEnviron(pid), ProcStack(pid), ProcStatus(pid), ProcOomScore(pid), ProcMaps(pid), ProcIO(pid), ProcMounts(pid), ProcFdInfo(pid)]\nfor _file in pid_wrappers:\n _file.dump()", "net_wrappers = [ProcARP(), ProcNetDev(), ProcProtocols()]\nfor _file in net_wrappers:\n _file.dump()", "sys_wrappers = [ProcVSyscall(), ProcFileNR(), ProcInodeNR(), ProcDumpable(), ProcPidMax(), ProcThreadMax()]\nfor _file in sys_wrappers:\n _file.dump()"], "bodies_text": "<|body_start_0|>\n base_wrappers = [ProcStat(), ProcSwaps(), ProcUptime(), ProcVersion(), ProcBuddyInfo(), ProcCpuInfo(), ProcMemInfo(), ProcFileSystems(), ProcCrypto(), ProcInterrupts(), ProcPartitions(), ProcModules()]\n for _file in base_wrappers:\n _file.dump()\n<|end_body_0|>\n\n<|body_start_1|>\n pid_wrappers = [ProcCmdline(pid), ProcEnviron(pid), ProcStack(pid), ProcStatus(pid), ProcOomScore(pid), ProcMaps(pid), ProcIO(pid), ProcMounts(pid), ProcFdInfo(pid)]\n for _file in pid_wrappers:\n _file.dump()\n<|end_body_1|>\n\n<|body_start_2|>\n net_wrappers = [ProcARP(), ProcNetDev(), ProcProtocols()]\n for _file in net_wrappers:\n _file.dump()\n<|end_body_2|>\n\n<|body_start_3|>\n sys_wrappers = [ProcVSyscall(), ProcFileNR(), ProcInodeNR(), ProcDumpable(), ProcPidMax(), ProcThreadMax()]\n for _file in sys_wrappers:\n _file.dump()\n<|end_body_3|>\n", "class_docstring": "Class manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.", "class_name": "ProcDirectory", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProcDirectory:\n \"\"\"Class manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.\"\"\"\n\n def dump_base(self):\n \"\"\"Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_0|>\n\n def dump_proc(self, pid):\n \"\"\"Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_1|>\n\n def dump_net(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_2|>\n\n def dump_sys(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base_wrappers = [ProcStat(), ProcSwaps(), ProcUptime(), ProcVersion(), ProcBuddyInfo(), ProcCpuInfo(), ProcMemInfo(), ProcFileSystems(), ProcCrypto(), ProcInterrupts(), ProcPartitions(), ProcModules()]\n for _file in base_wrappers:\n _file.dump()\n<|end_body_0|>\n\n<|body_start_1|>\n pid_wrappers = [ProcCmdline(pid), ProcEnviron(pid), ProcStack(pid), ProcStatus(pid), ProcOomScore(pid), ProcMaps(pid), ProcIO(pid), ProcMounts(pid), ProcFdInfo(pid)]\n for _file in pid_wrappers:\n _file.dump()\n<|end_body_1|>\n\n<|body_start_2|>\n net_wrappers = [ProcARP(), ProcNetDev(), ProcProtocols()]\n for _file in net_wrappers:\n _file.dump()\n<|end_body_2|>\n\n<|body_start_3|>\n sys_wrappers = [ProcVSyscall(), ProcFileNR(), ProcInodeNR(), ProcDumpable(), ProcPidMax(), ProcThreadMax()]\n for _file in sys_wrappers:\n _file.dump()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000127", "length_bytes": 4315, "license_type": "permissive", "methods": [{"docstring": "Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.", "name": "dump_base", "signature": "def dump_base(self)"}, {"docstring": "Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.", "name": "dump_proc", "signature": "def dump_proc(self, pid)"}, {"docstring": "Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.", "name": "dump_net", "signature": "def dump_net(self)"}, {"docstring": "Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.", "name": "dump_sys", "signature": "def dump_sys(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_053264", "prompt": "Implement the Python class `ProcDirectory` described below.\n\nClass description:\nClass manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.\n\nMethod signatures and docstrings:\n- def dump_base(self): Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n- def dump_proc(self, pid): Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n- def dump_net(self): Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n- def dump_sys(self): Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.", "prompted_full_text": "Implement the Python class `ProcDirectory` described below.\n\nClass description:\nClass manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.\n\nMethod signatures and docstrings:\n- def dump_base(self): Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n- def dump_proc(self, pid): Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n- def dump_net(self): Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n- def dump_sys(self): Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\n\n<|skeleton|>\nclass ProcDirectory:\n \"\"\"Class manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.\"\"\"\n\n def dump_base(self):\n \"\"\"Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_0|>\n\n def dump_proc(self, pid):\n \"\"\"Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_1|>\n\n def dump_net(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_2|>\n\n def dump_sys(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n base_wrappers = [ProcStat(), ProcSwaps(), ProcUptime(), ProcVersion(), ProcBuddyInfo(), ProcCpuInfo(), ProcMemInfo(), ProcFileSystems(), ProcCrypto(), ProcInterrupts(), ProcPartitions(), ProcModules()]\n for _file in base_wrappers:\n _file.dump()\n<|end_body_0|>\n\n<|body_start_1|>\n pid_wrappers = [ProcCmdline(pid), ProcEnviron(pid), ProcStack(pid), ProcStatus(pid), ProcOomScore(pid), ProcMaps(pid), ProcIO(pid), ProcMounts(pid), ProcFdInfo(pid)]\n for _file in pid_wrappers:\n _file.dump()\n<|end_body_1|>\n\n<|body_start_2|>\n net_wrappers = [ProcARP(), ProcNetDev(), ProcProtocols()]\n for _file in net_wrappers:\n _file.dump()\n<|end_body_2|>\n\n<|body_start_3|>\n sys_wrappers = [ProcVSyscall(), ProcFileNR(), ProcInodeNR(), ProcDumpable(), ProcPidMax(), ProcThreadMax()]\n for _file in sys_wrappers:\n _file.dump()\n<|end_body_3|>\n", "revision_id": "5fc781852dcdf55c3a807e97692224a28c0913f6", "skeleton": "<|skeleton|>\nclass ProcDirectory:\n \"\"\"Class manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.\"\"\"\n\n def dump_base(self):\n \"\"\"Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_0|>\n\n def dump_proc(self, pid):\n \"\"\"Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_1|>\n\n def dump_net(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_2|>\n\n def dump_sys(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProcDirectory:\n \"\"\"Class manages the list of supported /proc sub-files we have classes for. As well as printing the information contained in them to stdout.\"\"\"\n\n def dump_base(self):\n \"\"\"Creates instances of all the file abstractions from the root /proc directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n base_wrappers = [ProcStat(), ProcSwaps(), ProcUptime(), ProcVersion(), ProcBuddyInfo(), ProcCpuInfo(), ProcMemInfo(), ProcFileSystems(), ProcCrypto(), ProcInterrupts(), ProcPartitions(), ProcModules()]\n for _file in base_wrappers:\n _file.dump()\n\n def dump_proc(self, pid):\n \"\"\"Creates instances of all the file abstractions from the /proc/[pid] directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n pid_wrappers = [ProcCmdline(pid), ProcEnviron(pid), ProcStack(pid), ProcStatus(pid), ProcOomScore(pid), ProcMaps(pid), ProcIO(pid), ProcMounts(pid), ProcFdInfo(pid)]\n for _file in pid_wrappers:\n _file.dump()\n\n def dump_net(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/net directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n net_wrappers = [ProcARP(), ProcNetDev(), ProcProtocols()]\n for _file in net_wrappers:\n _file.dump()\n\n def dump_sys(self):\n \"\"\"Creates instances of all the file abstractions from the /proc/sys directory, inheriting from procBase. Then iterates over all files and prints their details to stdout using overridden procBase dump() function.\"\"\"\n sys_wrappers = [ProcVSyscall(), ProcFileNR(), ProcInodeNR(), ProcDumpable(), ProcPidMax(), ProcThreadMax()]\n for _file in sys_wrappers:\n _file.dump()\n", "source": "the_stack_v2_python_sparse", "source_path": "proc_scraper/proc_directory.py", "source_repo": "EwanC/pyProc", "split": "test", "star_events_count": 0} {"blob_id": "1be87abcd57d18a027344369f660b695f36e5444", "bodies": ["user = User.query.filter_by(username=username.data).first()\nif user:\n raise ValidationError('The username is used already')", "email = User.query.filter_by(email=email.data).first()\nif email:\n raise ValidationError('The email is used already')"], "bodies_text": "<|body_start_0|>\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('The username is used already')\n<|end_body_0|>\n\n<|body_start_1|>\n email = User.query.filter_by(email=email.data).first()\n if email:\n raise ValidationError('The email is used already')\n<|end_body_1|>\n", "class_docstring": "Form for user site", "class_name": "RegistrationForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegistrationForm:\n \"\"\"Form for user site\"\"\"\n\n def validate_username(self, username):\n \"\"\"To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error\"\"\"\n <|body_0|>\n\n def validate_email(self, email):\n \"\"\"To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('The username is used already')\n<|end_body_0|>\n\n<|body_start_1|>\n email = User.query.filter_by(email=email.data).first()\n if email:\n raise ValidationError('The email is used already')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000128", "length_bytes": 4874, "license_type": "no_license", "methods": [{"docstring": "To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error", "name": "validate_username", "signature": "def validate_username(self, username)"}, {"docstring": "To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error", "name": "validate_email", "signature": "def validate_email(self, email)"}], "n_methods": 2, "prompt": "Implement the Python class `RegistrationForm` described below.\n\nClass description:\nForm for user site\n\nMethod signatures and docstrings:\n- def validate_username(self, username): To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error\n- def validate_email(self, email): To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error", "prompted_full_text": "Implement the Python class `RegistrationForm` described below.\n\nClass description:\nForm for user site\n\nMethod signatures and docstrings:\n- def validate_username(self, username): To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error\n- def validate_email(self, email): To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error\n\n<|skeleton|>\nclass RegistrationForm:\n \"\"\"Form for user site\"\"\"\n\n def validate_username(self, username):\n \"\"\"To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error\"\"\"\n <|body_0|>\n\n def validate_email(self, email):\n \"\"\"To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('The username is used already')\n<|end_body_0|>\n\n<|body_start_1|>\n email = User.query.filter_by(email=email.data).first()\n if email:\n raise ValidationError('The email is used already')\n<|end_body_1|>\n", "revision_id": "34ac809218ff2a2a8f0ef2772c5702fbb4503e8f", "skeleton": "<|skeleton|>\nclass RegistrationForm:\n \"\"\"Form for user site\"\"\"\n\n def validate_username(self, username):\n \"\"\"To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error\"\"\"\n <|body_0|>\n\n def validate_email(self, email):\n \"\"\"To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RegistrationForm:\n \"\"\"Form for user site\"\"\"\n\n def validate_username(self, username):\n \"\"\"To validate the username if it is already exist :param username: username on the form :return: if username submitted form is already exist, return error\"\"\"\n user = User.query.filter_by(username=username.data).first()\n if user:\n raise ValidationError('The username is used already')\n\n def validate_email(self, email):\n \"\"\"To validate the email if it is already exist :param email: email on the form :return: if email submitted form is already exist, return error\"\"\"\n email = User.query.filter_by(email=email.data).first()\n if email:\n raise ValidationError('The email is used already')\n", "source": "the_stack_v2_python_sparse", "source_path": "docmanage/users/forms.py", "source_repo": "gadenahi/doc_manage", "split": "test", "star_events_count": 0} {"blob_id": "4ea61d473c4329c7de3557c3591a9985f3f16862", "bodies": ["user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\nserializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, context={'request': request})\ndata = serializer.data\ndata['state'] = serializer.data['state']\nif data['workflowcollectionengagementdetail_set'] and data['workflowcollectionengagementdetail_set'][-1]['user_responses'] and ('inputs' in data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1].keys()):\n user_inputs = data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1]['inputs']\n print('Something to evaluate')\n checker = [entry['is_valid'] for entry in user_inputs]\n data['state']['proceed'] = False if False in checker else True\nelse:\n data['state']['proceed'] = True\nreturn Response(data=data)", "user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\noriginally_unfinished = not user_engagement.finished\nserializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, data=request.data, partial=True, context={'request': request})\ntry:\n serializer.is_valid(raise_exception=True)\nexcept ValidationError as e:\n logger.error('Error validating Workflow Collection Engagement', exc_info=e, extra=generate_extra(request=request, workflow_collection_engagement=user_engagement, serializer_errors=serializer.errors))\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\nelse:\n instance: WorkflowCollectionEngagement = serializer.save()\n if instance.finished and originally_unfinished:\n logger.info(\"User '%s' completed workflow collection '%s' version '%d'\", request.user.username, instance.workflow_collection.code, instance.workflow_collection.version, extra=generate_extra(event_code='WORKFLOW_COLLECTION_ENGAGEMENT_COMPLETED', request=request, workflow_collection_engagement=instance))\n return Response(serializer.data)"], "bodies_text": "<|body_start_0|>\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, context={'request': request})\n data = serializer.data\n data['state'] = serializer.data['state']\n if data['workflowcollectionengagementdetail_set'] and data['workflowcollectionengagementdetail_set'][-1]['user_responses'] and ('inputs' in data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1].keys()):\n user_inputs = data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1]['inputs']\n print('Something to evaluate')\n checker = [entry['is_valid'] for entry in user_inputs]\n data['state']['proceed'] = False if False in checker else True\n else:\n data['state']['proceed'] = True\n return Response(data=data)\n<|end_body_0|>\n\n<|body_start_1|>\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n originally_unfinished = not user_engagement.finished\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, data=request.data, partial=True, context={'request': request})\n try:\n serializer.is_valid(raise_exception=True)\n except ValidationError as e:\n logger.error('Error validating Workflow Collection Engagement', exc_info=e, extra=generate_extra(request=request, workflow_collection_engagement=user_engagement, serializer_errors=serializer.errors))\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n instance: WorkflowCollectionEngagement = serializer.save()\n if instance.finished and originally_unfinished:\n logger.info(\"User '%s' completed workflow collection '%s' version '%d'\", request.user.username, instance.workflow_collection.code, instance.workflow_collection.version, extra=generate_extra(event_code='WORKFLOW_COLLECTION_ENGAGEMENT_COMPLETED', request=request, workflow_collection_engagement=instance))\n return Response(serializer.data)\n<|end_body_1|>\n", "class_docstring": "**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.", "class_name": "WorkflowCollectionEngagementView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WorkflowCollectionEngagementView:\n \"\"\"**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.\"\"\"\n\n def get(self, request, id):\n \"\"\"GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d\"\"\"\n <|body_0|>\n\n def patch(self, request, id):\n \"\"\"PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, context={'request': request})\n data = serializer.data\n data['state'] = serializer.data['state']\n if data['workflowcollectionengagementdetail_set'] and data['workflowcollectionengagementdetail_set'][-1]['user_responses'] and ('inputs' in data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1].keys()):\n user_inputs = data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1]['inputs']\n print('Something to evaluate')\n checker = [entry['is_valid'] for entry in user_inputs]\n data['state']['proceed'] = False if False in checker else True\n else:\n data['state']['proceed'] = True\n return Response(data=data)\n<|end_body_0|>\n\n<|body_start_1|>\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n originally_unfinished = not user_engagement.finished\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, data=request.data, partial=True, context={'request': request})\n try:\n serializer.is_valid(raise_exception=True)\n except ValidationError as e:\n logger.error('Error validating Workflow Collection Engagement', exc_info=e, extra=generate_extra(request=request, workflow_collection_engagement=user_engagement, serializer_errors=serializer.errors))\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n instance: WorkflowCollectionEngagement = serializer.save()\n if instance.finished and originally_unfinished:\n logger.info(\"User '%s' completed workflow collection '%s' version '%d'\", request.user.username, instance.workflow_collection.code, instance.workflow_collection.version, extra=generate_extra(event_code='WORKFLOW_COLLECTION_ENGAGEMENT_COMPLETED', request=request, workflow_collection_engagement=instance))\n return Response(serializer.data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000129", "length_bytes": 18129, "license_type": "permissive", "methods": [{"docstring": "GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d", "name": "get", "signature": "def get(self, request, id)"}, {"docstring": "PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work", "name": "patch", "signature": "def patch(self, request, id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046357", "prompt": "Implement the Python class `WorkflowCollectionEngagementView` described below.\n\nClass description:\n**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.\n\nMethod signatures and docstrings:\n- def get(self, request, id): GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d\n- def patch(self, request, id): PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work", "prompted_full_text": "Implement the Python class `WorkflowCollectionEngagementView` described below.\n\nClass description:\n**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.\n\nMethod signatures and docstrings:\n- def get(self, request, id): GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d\n- def patch(self, request, id): PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work\n\n<|skeleton|>\nclass WorkflowCollectionEngagementView:\n \"\"\"**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.\"\"\"\n\n def get(self, request, id):\n \"\"\"GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d\"\"\"\n <|body_0|>\n\n def patch(self, request, id):\n \"\"\"PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, context={'request': request})\n data = serializer.data\n data['state'] = serializer.data['state']\n if data['workflowcollectionengagementdetail_set'] and data['workflowcollectionengagementdetail_set'][-1]['user_responses'] and ('inputs' in data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1].keys()):\n user_inputs = data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1]['inputs']\n print('Something to evaluate')\n checker = [entry['is_valid'] for entry in user_inputs]\n data['state']['proceed'] = False if False in checker else True\n else:\n data['state']['proceed'] = True\n return Response(data=data)\n<|end_body_0|>\n\n<|body_start_1|>\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n originally_unfinished = not user_engagement.finished\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, data=request.data, partial=True, context={'request': request})\n try:\n serializer.is_valid(raise_exception=True)\n except ValidationError as e:\n logger.error('Error validating Workflow Collection Engagement', exc_info=e, extra=generate_extra(request=request, workflow_collection_engagement=user_engagement, serializer_errors=serializer.errors))\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n instance: WorkflowCollectionEngagement = serializer.save()\n if instance.finished and originally_unfinished:\n logger.info(\"User '%s' completed workflow collection '%s' version '%d'\", request.user.username, instance.workflow_collection.code, instance.workflow_collection.version, extra=generate_extra(event_code='WORKFLOW_COLLECTION_ENGAGEMENT_COMPLETED', request=request, workflow_collection_engagement=instance))\n return Response(serializer.data)\n<|end_body_1|>\n", "revision_id": "dc0e8807263266713d3d7fa46e240e8d72db28d1", "skeleton": "<|skeleton|>\nclass WorkflowCollectionEngagementView:\n \"\"\"**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.\"\"\"\n\n def get(self, request, id):\n \"\"\"GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d\"\"\"\n <|body_0|>\n\n def patch(self, request, id):\n \"\"\"PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WorkflowCollectionEngagementView:\n \"\"\"**Supported HTTP Methods** * Get: Retrieve a detailed representation of a specific Workflow Engagement owned by the requesting user. * Patch: Modifying an existing Workflow Engagement resource owned by the requesting user.\"\"\"\n\n def get(self, request, id):\n \"\"\"GET a WorkflowCollectionEngagement representation for current user. Path Parameters: id (str): The UUID of the WorkflowCollectionEngagement to retrieve. Returns: A HTTP response containing a dict-like JSON representation of the engagement target with a 200 status code. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"workflow_collection\": \"http://127.0.0.1:8000/workflow_system/collections/c7b1940f-f19d-49ab-9ed5-7161dd185087/\", \"started\": \"2021-04-09T19:36:52Z\", \"finished\": null, \"workflowcollectionengagementdetail_set\": [ { \"detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264d\"\"\"\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, context={'request': request})\n data = serializer.data\n data['state'] = serializer.data['state']\n if data['workflowcollectionengagementdetail_set'] and data['workflowcollectionengagementdetail_set'][-1]['user_responses'] and ('inputs' in data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1].keys()):\n user_inputs = data['workflowcollectionengagementdetail_set'][-1]['user_responses'][-1]['inputs']\n print('Something to evaluate')\n checker = [entry['is_valid'] for entry in user_inputs]\n data['state']['proceed'] = False if False in checker else True\n else:\n data['state']['proceed'] = True\n return Response(data=data)\n\n def patch(self, request, id):\n \"\"\"PATCH Workflow User Engagement details update for current user. Path Parameters: id (str): The UUID of the workflow user engagement target to modify. Body Parameters: workflow_collection (foreign key): The Workflow Collection object associated with the engagement. user (foreign key): The User object who is engaging the Workflow. started (datetime): The start date for the engagement. finished (datetime): The finish date for the engagement. Returns: A HTTP response object that depending on the result of the operation will have varying status codes and payloads. { \"self_detail\": \"http://127.0.0.1:8000/workflow_system/users/self/workflows/engagements/9b264dd6-0e53-4c39-9473-2d0888405532/\", \"work\"\"\"\n user_engagement = get_object_or_404(WorkflowCollectionEngagement, id=id, user=request.user.id)\n originally_unfinished = not user_engagement.finished\n serializer = WorkflowCollectionEngagementDetailedSerializer(user_engagement, data=request.data, partial=True, context={'request': request})\n try:\n serializer.is_valid(raise_exception=True)\n except ValidationError as e:\n logger.error('Error validating Workflow Collection Engagement', exc_info=e, extra=generate_extra(request=request, workflow_collection_engagement=user_engagement, serializer_errors=serializer.errors))\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n instance: WorkflowCollectionEngagement = serializer.save()\n if instance.finished and originally_unfinished:\n logger.info(\"User '%s' completed workflow collection '%s' version '%d'\", request.user.username, instance.workflow_collection.code, instance.workflow_collection.version, extra=generate_extra(event_code='WORKFLOW_COLLECTION_ENGAGEMENT_COMPLETED', request=request, workflow_collection_engagement=instance))\n return Response(serializer.data)\n", "source": "the_stack_v2_python_sparse", "source_path": "django_workflow_system/api/views/user/workflows/engagement.py", "source_repo": "kwang1971/django-workflow-system", "split": "test", "star_events_count": 0} {"blob_id": "a85090cd2866ef3fa6587a6fa7a38b0074dd534e", "bodies": ["self.data_to_sign = data_to_sign\nself.data_format = data_format\nself.external_reference = external_reference\nself.xslt = xslt\nself.signing_format = signing_format\nself.additional_properties = additional_properties", "if dictionary is None:\n return None\ndata_to_sign = dictionary.get('dataToSign')\ndata_format = dictionary.get('dataFormat')\nexternal_reference = dictionary.get('externalReference')\nxslt = dictionary.get('xslt')\nsigning_format = dictionary.get('signingFormat')\nfor key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\nreturn cls(data_to_sign, data_format, external_reference, xslt, signing_format, dictionary)"], "bodies_text": "<|body_start_0|>\n self.data_to_sign = data_to_sign\n self.data_format = data_format\n self.external_reference = external_reference\n self.xslt = xslt\n self.signing_format = signing_format\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n data_to_sign = dictionary.get('dataToSign')\n data_format = dictionary.get('dataFormat')\n external_reference = dictionary.get('externalReference')\n xslt = dictionary.get('xslt')\n signing_format = dictionary.get('signingFormat')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(data_to_sign, data_format, external_reference, xslt, signing_format, dictionary)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used", "class_name": "SignRequest", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SignRequest:\n \"\"\"Implementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used\"\"\"\n\n def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={}):\n \"\"\"Constructor for the SignRequest class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data_to_sign = data_to_sign\n self.data_format = data_format\n self.external_reference = external_reference\n self.xslt = xslt\n self.signing_format = signing_format\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n data_to_sign = dictionary.get('dataToSign')\n data_format = dictionary.get('dataFormat')\n external_reference = dictionary.get('externalReference')\n xslt = dictionary.get('xslt')\n signing_format = dictionary.get('signingFormat')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(data_to_sign, data_format, external_reference, xslt, signing_format, dictionary)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000130", "length_bytes": 3017, "license_type": "permissive", "methods": [{"docstring": "Constructor for the SignRequest class", "name": "__init__", "signature": "def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={})"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013473", "prompt": "Implement the Python class `SignRequest` described below.\n\nClass description:\nImplementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used\n\nMethod signatures and docstrings:\n- def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={}): Constructor for the SignRequest class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `SignRequest` described below.\n\nClass description:\nImplementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used\n\nMethod signatures and docstrings:\n- def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={}): Constructor for the SignRequest class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass SignRequest:\n \"\"\"Implementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used\"\"\"\n\n def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={}):\n \"\"\"Constructor for the SignRequest class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data_to_sign = data_to_sign\n self.data_format = data_format\n self.external_reference = external_reference\n self.xslt = xslt\n self.signing_format = signing_format\n self.additional_properties = additional_properties\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n data_to_sign = dictionary.get('dataToSign')\n data_format = dictionary.get('dataFormat')\n external_reference = dictionary.get('externalReference')\n xslt = dictionary.get('xslt')\n signing_format = dictionary.get('signingFormat')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(data_to_sign, data_format, external_reference, xslt, signing_format, dictionary)\n<|end_body_1|>\n", "revision_id": "fa3918a6c54ea0eedb9146578645b7eb1755b642", "skeleton": "<|skeleton|>\nclass SignRequest:\n \"\"\"Implementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used\"\"\"\n\n def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={}):\n \"\"\"Constructor for the SignRequest class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SignRequest:\n \"\"\"Implementation of the 'SignRequest' model. TODO: type model description here. Attributes: data_to_sign (string): Base 64 encoded data data_format (DataFormat): Format of data (i.e xml) external_reference (string): The service reference for the signing. Will be used for auditlog, and invoicing xslt (string): Base 64 encoded xslt (optional) signing_format (SigningFormat): Optional, if not set the default setting for the account will be used\"\"\"\n\n def __init__(self, data_to_sign=None, data_format=None, external_reference=None, xslt=None, signing_format=None, additional_properties={}):\n \"\"\"Constructor for the SignRequest class\"\"\"\n self.data_to_sign = data_to_sign\n self.data_format = data_format\n self.external_reference = external_reference\n self.xslt = xslt\n self.signing_format = signing_format\n self.additional_properties = additional_properties\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n data_to_sign = dictionary.get('dataToSign')\n data_format = dictionary.get('dataFormat')\n external_reference = dictionary.get('externalReference')\n xslt = dictionary.get('xslt')\n signing_format = dictionary.get('signingFormat')\n for key in cls._names.values():\n if key in dictionary:\n del dictionary[key]\n return cls(data_to_sign, data_format, external_reference, xslt, signing_format, dictionary)\n", "source": "the_stack_v2_python_sparse", "source_path": "idfy_rest_client/models/sign_request.py", "source_repo": "dealflowteam/Idfy", "split": "test", "star_events_count": 0} {"blob_id": "58d8c2271fb423f8309143ccf3d44b10f145e02b", "bodies": ["print(data)\nmin_date = timezone.now() + timedelta(minutes=10)\nif data <= min_date:\n raise serializers.ValidationError('Departure time must be at least pass the next 20 minutes window')\nreturn data", "if self.context['request'].user != data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\nuser = data['offered_by']\ncircle = self.context['circle']\ntry:\n membership = MemberShip.objects.get(user=user, circle=circle, is_active=True)\nexcept MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\nif data['arrival_date'] <= data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\nself.context['membership'] = membership\nreturn data", "circle = self.context['circle']\nride = Rides.objects.create(**data, offered_in=circle)\ncircle.rides_offered += 1\ncircle.save()\nmembership = self.context['membership']\nmembership.rides_offered += 1\nmembership.save()\nprofile = data['offered_by'].profile\nprofile.rides_offered += 1\nprofile.save()\nreturn ride"], "bodies_text": "<|body_start_0|>\n print(data)\n min_date = timezone.now() + timedelta(minutes=10)\n if data <= min_date:\n raise serializers.ValidationError('Departure time must be at least pass the next 20 minutes window')\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n if self.context['request'].user != data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n try:\n membership = MemberShip.objects.get(user=user, circle=circle, is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n if data['arrival_date'] <= data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n self.context['membership'] = membership\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n circle = self.context['circle']\n ride = Rides.objects.create(**data, offered_in=circle)\n circle.rides_offered += 1\n circle.save()\n membership = self.context['membership']\n membership.rides_offered += 1\n membership.save()\n profile = data['offered_by'].profile\n profile.rides_offered += 1\n profile.save()\n return ride\n<|end_body_2|>\n", "class_docstring": "Create ride serializer", "class_name": "CreateRideSerialier", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CreateRideSerialier:\n \"\"\"Create ride serializer\"\"\"\n\n def validate_departure_date(self, data):\n \"\"\"Verify date is not in the past\"\"\"\n <|body_0|>\n\n def validate(self, data):\n \"\"\"Validate Verify that the person who offers the ride is member and also the same user making the request\"\"\"\n <|body_1|>\n\n def create(self, data):\n \"\"\"Create ride and update stats\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n print(data)\n min_date = timezone.now() + timedelta(minutes=10)\n if data <= min_date:\n raise serializers.ValidationError('Departure time must be at least pass the next 20 minutes window')\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n if self.context['request'].user != data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n try:\n membership = MemberShip.objects.get(user=user, circle=circle, is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n if data['arrival_date'] <= data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n self.context['membership'] = membership\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n circle = self.context['circle']\n ride = Rides.objects.create(**data, offered_in=circle)\n circle.rides_offered += 1\n circle.save()\n membership = self.context['membership']\n membership.rides_offered += 1\n membership.save()\n profile = data['offered_by'].profile\n profile.rides_offered += 1\n profile.save()\n return ride\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000131", "length_bytes": 7953, "license_type": "no_license", "methods": [{"docstring": "Verify date is not in the past", "name": "validate_departure_date", "signature": "def validate_departure_date(self, data)"}, {"docstring": "Validate Verify that the person who offers the ride is member and also the same user making the request", "name": "validate", "signature": "def validate(self, data)"}, {"docstring": "Create ride and update stats", "name": "create", "signature": "def create(self, data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_032706", "prompt": "Implement the Python class `CreateRideSerialier` described below.\n\nClass description:\nCreate ride serializer\n\nMethod signatures and docstrings:\n- def validate_departure_date(self, data): Verify date is not in the past\n- def validate(self, data): Validate Verify that the person who offers the ride is member and also the same user making the request\n- def create(self, data): Create ride and update stats", "prompted_full_text": "Implement the Python class `CreateRideSerialier` described below.\n\nClass description:\nCreate ride serializer\n\nMethod signatures and docstrings:\n- def validate_departure_date(self, data): Verify date is not in the past\n- def validate(self, data): Validate Verify that the person who offers the ride is member and also the same user making the request\n- def create(self, data): Create ride and update stats\n\n<|skeleton|>\nclass CreateRideSerialier:\n \"\"\"Create ride serializer\"\"\"\n\n def validate_departure_date(self, data):\n \"\"\"Verify date is not in the past\"\"\"\n <|body_0|>\n\n def validate(self, data):\n \"\"\"Validate Verify that the person who offers the ride is member and also the same user making the request\"\"\"\n <|body_1|>\n\n def create(self, data):\n \"\"\"Create ride and update stats\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n print(data)\n min_date = timezone.now() + timedelta(minutes=10)\n if data <= min_date:\n raise serializers.ValidationError('Departure time must be at least pass the next 20 minutes window')\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n if self.context['request'].user != data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n try:\n membership = MemberShip.objects.get(user=user, circle=circle, is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n if data['arrival_date'] <= data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n self.context['membership'] = membership\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n circle = self.context['circle']\n ride = Rides.objects.create(**data, offered_in=circle)\n circle.rides_offered += 1\n circle.save()\n membership = self.context['membership']\n membership.rides_offered += 1\n membership.save()\n profile = data['offered_by'].profile\n profile.rides_offered += 1\n profile.save()\n return ride\n<|end_body_2|>\n", "revision_id": "0cede53169041667bd40bbce3c4774af84ffc2fa", "skeleton": "<|skeleton|>\nclass CreateRideSerialier:\n \"\"\"Create ride serializer\"\"\"\n\n def validate_departure_date(self, data):\n \"\"\"Verify date is not in the past\"\"\"\n <|body_0|>\n\n def validate(self, data):\n \"\"\"Validate Verify that the person who offers the ride is member and also the same user making the request\"\"\"\n <|body_1|>\n\n def create(self, data):\n \"\"\"Create ride and update stats\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CreateRideSerialier:\n \"\"\"Create ride serializer\"\"\"\n\n def validate_departure_date(self, data):\n \"\"\"Verify date is not in the past\"\"\"\n print(data)\n min_date = timezone.now() + timedelta(minutes=10)\n if data <= min_date:\n raise serializers.ValidationError('Departure time must be at least pass the next 20 minutes window')\n return data\n\n def validate(self, data):\n \"\"\"Validate Verify that the person who offers the ride is member and also the same user making the request\"\"\"\n if self.context['request'].user != data['offered_by']:\n raise serializer.ValidationError('Ride offered on behalf of others are not allowed')\n user = data['offered_by']\n circle = self.context['circle']\n try:\n membership = MemberShip.objects.get(user=user, circle=circle, is_active=True)\n except MemberShip.DoesNotExist:\n raise serializers.ValidationError('user is not an active member of circle')\n if data['arrival_date'] <= data['departure_date']:\n raise serializers.ValidationError('Departure date must happen offer arrival date')\n self.context['membership'] = membership\n return data\n\n def create(self, data):\n \"\"\"Create ride and update stats\"\"\"\n circle = self.context['circle']\n ride = Rides.objects.create(**data, offered_in=circle)\n circle.rides_offered += 1\n circle.save()\n membership = self.context['membership']\n membership.rides_offered += 1\n membership.save()\n profile = data['offered_by'].profile\n profile.rides_offered += 1\n profile.save()\n return ride\n", "source": "the_stack_v2_python_sparse", "source_path": "rides/serializers/rides.py", "source_repo": "KrystellCR/DjangoRF", "split": "test", "star_events_count": 0} {"blob_id": "226199e0c2f92d9980903a2716d2a5be309b83d7", "bodies": ["try:\n return ip_address(ip)\nexcept ValueError:\n try:\n return ip_network(ip, strict=False)\n except ValueError:\n raise IPToolsExceptions.NotValidIP(\"'{}' is not a valid IP network or address\".format(ip)) from None", "for i in ip_list:\n if isinstance(i, IPv4Address or isinstance(i, IPv6Address)):\n if u_ip == i:\n return True\n elif isinstance(i, IPv4Network) or isinstance(i, IPv6Network):\n if u_ip in i:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n try:\n return ip_address(ip)\n except ValueError:\n try:\n return ip_network(ip, strict=False)\n except ValueError:\n raise IPToolsExceptions.NotValidIP(\"'{}' is not a valid IP network or address\".format(ip)) from None\n<|end_body_0|>\n\n<|body_start_1|>\n for i in ip_list:\n if isinstance(i, IPv4Address or isinstance(i, IPv6Address)):\n if u_ip == i:\n return True\n elif isinstance(i, IPv4Network) or isinstance(i, IPv6Network):\n if u_ip in i:\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "IPCheck", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IPCheck:\n\n def checkIfIP(ip: str):\n \"\"\"Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress\"\"\"\n <|body_0|>\n\n def ipInList(u_ip, ip_list):\n \"\"\"Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return ip_address(ip)\n except ValueError:\n try:\n return ip_network(ip, strict=False)\n except ValueError:\n raise IPToolsExceptions.NotValidIP(\"'{}' is not a valid IP network or address\".format(ip)) from None\n<|end_body_0|>\n\n<|body_start_1|>\n for i in ip_list:\n if isinstance(i, IPv4Address or isinstance(i, IPv6Address)):\n if u_ip == i:\n return True\n elif isinstance(i, IPv4Network) or isinstance(i, IPv6Network):\n if u_ip in i:\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000132", "length_bytes": 5784, "license_type": "no_license", "methods": [{"docstring": "Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress", "name": "checkIfIP", "signature": "def checkIfIP(ip: str)"}, {"docstring": "Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list", "name": "ipInList", "signature": "def ipInList(u_ip, ip_list)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027535", "prompt": "Implement the Python class `IPCheck` described below.\n\nClass description:\nImplement the IPCheck class.\n\nMethod signatures and docstrings:\n- def checkIfIP(ip: str): Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress\n- def ipInList(u_ip, ip_list): Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list", "prompted_full_text": "Implement the Python class `IPCheck` described below.\n\nClass description:\nImplement the IPCheck class.\n\nMethod signatures and docstrings:\n- def checkIfIP(ip: str): Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress\n- def ipInList(u_ip, ip_list): Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list\n\n<|skeleton|>\nclass IPCheck:\n\n def checkIfIP(ip: str):\n \"\"\"Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress\"\"\"\n <|body_0|>\n\n def ipInList(u_ip, ip_list):\n \"\"\"Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return ip_address(ip)\n except ValueError:\n try:\n return ip_network(ip, strict=False)\n except ValueError:\n raise IPToolsExceptions.NotValidIP(\"'{}' is not a valid IP network or address\".format(ip)) from None\n<|end_body_0|>\n\n<|body_start_1|>\n for i in ip_list:\n if isinstance(i, IPv4Address or isinstance(i, IPv6Address)):\n if u_ip == i:\n return True\n elif isinstance(i, IPv4Network) or isinstance(i, IPv6Network):\n if u_ip in i:\n return True\n return False\n<|end_body_1|>\n", "revision_id": "c63c8515a05b15e55ebc0911edf5cf26e213fe1d", "skeleton": "<|skeleton|>\nclass IPCheck:\n\n def checkIfIP(ip: str):\n \"\"\"Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress\"\"\"\n <|body_0|>\n\n def ipInList(u_ip, ip_list):\n \"\"\"Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IPCheck:\n def checkIfIP(ip: str):\n \"\"\"Checks if the provided string is a valid IPv4 or IPv6 address This function either returns an ipaddress object or None :param ip: IP address in string format :type ip: str :return: Returns an ipaddress object :rtype: ipaddress\"\"\"\n try:\n return ip_address(ip)\n except ValueError:\n try:\n return ip_network(ip, strict=False)\n except ValueError:\n raise IPToolsExceptions.NotValidIP(\"'{}' is not a valid IP network or address\".format(ip)) from None\n\n def ipInList(u_ip, ip_list):\n \"\"\"Check if an unknown IP address (not network), (u_ip) is in a provided list (ip_list) :param u_ip: IP address :param ip_list: List of IP addresses or networks :return: True if u_ip in ip_list; False if u_ip NOT in ip_list\"\"\"\n for i in ip_list:\n if isinstance(i, IPv4Address or isinstance(i, IPv6Address)):\n if u_ip == i:\n return True\n elif isinstance(i, IPv4Network) or isinstance(i, IPv6Network):\n if u_ip in i:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/lib/iptools.py", "source_repo": "radioboyQ/work_utils", "split": "test", "star_events_count": 0} {"blob_id": "aaa9dcdb4bce2e61a58e426639a168bd91edddef", "bodies": ["if not root:\n return ''\nqueue = deque()\nqueue.append(root)\nres = ''\nwhile len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\nreturn res", "if data == '':\n return None\nqueue = deque()\nvalues = data.split()\nroot = TreeNode(int(values[0]))\nqueue.append(root)\ni = 1\nwhile i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000133", "length_bytes": 1455, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046898", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n<|end_body_1|>\n", "revision_id": "90b6287b742c8bfd3797540c408d679be2821a40", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n if not root:\n return ''\n queue = deque()\n queue.append(root)\n res = ''\n while len(queue):\n node = queue.popleft()\n if not node:\n res += 'n '\n continue\n res += str(node.val) + ' '\n queue.append(node.left)\n queue.append(node.right)\n return res\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n if data == '':\n return None\n queue = deque()\n values = data.split()\n root = TreeNode(int(values[0]))\n queue.append(root)\n i = 1\n while i < len(values):\n parent = queue.popleft()\n if values[i] != 'n':\n left = TreeNode(int(values[i]))\n parent.left = left\n queue.append(left)\n i += 1\n if values[i] != 'n':\n right = TreeNode(int(values[i]))\n parent.right = right\n queue.append(right)\n i += 1\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCodeSolutions/python/297_Serialize_and_Deserialize_Binary_Tree.py", "source_repo": "ChuanleiGuo/AlgorithmsPlayground", "split": "test", "star_events_count": 1} {"blob_id": "9c879ac91c2692085274e3567788772c03231a63", "bodies": ["try:\n return Response(self.nyuki.running_workflows[iid].report(data=False))\nexcept KeyError:\n return Response(status=404)", "try:\n wf = self.nyuki.running_workflows[iid]\nexcept KeyError:\n return Response(status=404)\nrequest = await request.json()\ntry:\n action = request['action']\nexcept KeyError:\n return Response(status=400, body={'action parameter required'})\nif action == 'suspend':\n wf.instance.suspend()\nelif action == 'resume':\n wf.instance.resume()\nelse:\n return Response(status=400, body={\"action must be 'suspend' or 'resume'\"})\nreturn Response(wf.report())", "try:\n self.nyuki.running_workflows[iid].instance.cancel()\nexcept KeyError:\n return Response(status=404)"], "bodies_text": "<|body_start_0|>\n try:\n return Response(self.nyuki.running_workflows[iid].report(data=False))\n except KeyError:\n return Response(status=404)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n wf = self.nyuki.running_workflows[iid]\n except KeyError:\n return Response(status=404)\n request = await request.json()\n try:\n action = request['action']\n except KeyError:\n return Response(status=400, body={'action parameter required'})\n if action == 'suspend':\n wf.instance.suspend()\n elif action == 'resume':\n wf.instance.resume()\n else:\n return Response(status=400, body={\"action must be 'suspend' or 'resume'\"})\n return Response(wf.report())\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.nyuki.running_workflows[iid].instance.cancel()\n except KeyError:\n return Response(status=404)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ApiWorkflow", "detected_licenses": ["MIT", "GPL-1.0-or-later", "LicenseRef-scancode-other-copyleft", "GPL-2.0-or-later", "LicenseRef-scancode-unknown-license-reference", "LGPL-2.1-or-later", "GPL-2.0-only", "LicenseRef-scancode-proprietary-license", "LicenseRef-scancode-generic-exception", "Apache-2.0", "LicenseRef-scancode-warranty-disclaimer"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ApiWorkflow:\n\n async def get(self, request, iid):\n \"\"\"Return a workflow instance\"\"\"\n <|body_0|>\n\n async def post(self, request, iid):\n \"\"\"Suspend/resume a runnning workflow.\"\"\"\n <|body_1|>\n\n async def delete(self, request, iid):\n \"\"\"Cancel a workflow instance.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return Response(self.nyuki.running_workflows[iid].report(data=False))\n except KeyError:\n return Response(status=404)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n wf = self.nyuki.running_workflows[iid]\n except KeyError:\n return Response(status=404)\n request = await request.json()\n try:\n action = request['action']\n except KeyError:\n return Response(status=400, body={'action parameter required'})\n if action == 'suspend':\n wf.instance.suspend()\n elif action == 'resume':\n wf.instance.resume()\n else:\n return Response(status=400, body={\"action must be 'suspend' or 'resume'\"})\n return Response(wf.report())\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.nyuki.running_workflows[iid].instance.cancel()\n except KeyError:\n return Response(status=404)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000134", "length_bytes": 15185, "license_type": "permissive", "methods": [{"docstring": "Return a workflow instance", "name": "get", "signature": "async def get(self, request, iid)"}, {"docstring": "Suspend/resume a runnning workflow.", "name": "post", "signature": "async def post(self, request, iid)"}, {"docstring": "Cancel a workflow instance.", "name": "delete", "signature": "async def delete(self, request, iid)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_005229", "prompt": "Implement the Python class `ApiWorkflow` described below.\n\nClass description:\nImplement the ApiWorkflow class.\n\nMethod signatures and docstrings:\n- async def get(self, request, iid): Return a workflow instance\n- async def post(self, request, iid): Suspend/resume a runnning workflow.\n- async def delete(self, request, iid): Cancel a workflow instance.", "prompted_full_text": "Implement the Python class `ApiWorkflow` described below.\n\nClass description:\nImplement the ApiWorkflow class.\n\nMethod signatures and docstrings:\n- async def get(self, request, iid): Return a workflow instance\n- async def post(self, request, iid): Suspend/resume a runnning workflow.\n- async def delete(self, request, iid): Cancel a workflow instance.\n\n<|skeleton|>\nclass ApiWorkflow:\n\n async def get(self, request, iid):\n \"\"\"Return a workflow instance\"\"\"\n <|body_0|>\n\n async def post(self, request, iid):\n \"\"\"Suspend/resume a runnning workflow.\"\"\"\n <|body_1|>\n\n async def delete(self, request, iid):\n \"\"\"Cancel a workflow instance.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n return Response(self.nyuki.running_workflows[iid].report(data=False))\n except KeyError:\n return Response(status=404)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n wf = self.nyuki.running_workflows[iid]\n except KeyError:\n return Response(status=404)\n request = await request.json()\n try:\n action = request['action']\n except KeyError:\n return Response(status=400, body={'action parameter required'})\n if action == 'suspend':\n wf.instance.suspend()\n elif action == 'resume':\n wf.instance.resume()\n else:\n return Response(status=400, body={\"action must be 'suspend' or 'resume'\"})\n return Response(wf.report())\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n self.nyuki.running_workflows[iid].instance.cancel()\n except KeyError:\n return Response(status=404)\n<|end_body_2|>\n", "revision_id": "f185fababee380660930243515652093855acfe7", "skeleton": "<|skeleton|>\nclass ApiWorkflow:\n\n async def get(self, request, iid):\n \"\"\"Return a workflow instance\"\"\"\n <|body_0|>\n\n async def post(self, request, iid):\n \"\"\"Suspend/resume a runnning workflow.\"\"\"\n <|body_1|>\n\n async def delete(self, request, iid):\n \"\"\"Cancel a workflow instance.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ApiWorkflow:\n async def get(self, request, iid):\n \"\"\"Return a workflow instance\"\"\"\n try:\n return Response(self.nyuki.running_workflows[iid].report(data=False))\n except KeyError:\n return Response(status=404)\n\n async def post(self, request, iid):\n \"\"\"Suspend/resume a runnning workflow.\"\"\"\n try:\n wf = self.nyuki.running_workflows[iid]\n except KeyError:\n return Response(status=404)\n request = await request.json()\n try:\n action = request['action']\n except KeyError:\n return Response(status=400, body={'action parameter required'})\n if action == 'suspend':\n wf.instance.suspend()\n elif action == 'resume':\n wf.instance.resume()\n else:\n return Response(status=400, body={\"action must be 'suspend' or 'resume'\"})\n return Response(wf.report())\n\n async def delete(self, request, iid):\n \"\"\"Cancel a workflow instance.\"\"\"\n try:\n self.nyuki.running_workflows[iid].instance.cancel()\n except KeyError:\n return Response(status=404)\n", "source": "the_stack_v2_python_sparse", "source_path": "nyuki/workflow/api/instances.py", "source_repo": "d-nery/nyuki", "split": "test", "star_events_count": 0} {"blob_id": "171aff9e6483768a821a2bfa7a0eceafcb83882d", "bodies": ["if n <= 0:\n raise ValueError('n must be a positive value')\nif p < 0 or p > 1:\n raise ValueError('p must be greater than 0 and less than 1')\nself.n = int(n)\nself.p = float(p)\nif data is None:\n self.n = self.n\n self.p = self.p\nelse:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.mean = sum(data) / len(data)\n sigma = 0\n i = 1\n for i in range(len(data)):\n sigma += (data[i] - self.mean) ** 2\n variance = 1 / len(data) * sigma\n self.stddev = variance ** (1 / 2)", "raiz = (2 * (self.π * self.stddev ** 2)) ** (1 / 2)\npotencia_1 = -(x - self.mean) ** 2\npotencia_2 = 2 * self.stddev ** 2\nreturn 1 / raiz * self.e ** (potencia_1 / potencia_2)", "x_func = (x - self.mean) / (self.stddev * 2 ** (1 / 2))\nerror_division = 2 / (self.π * (1 / 2))\npower_three = x_func ** 3 / 3\npower_five = x_func ** 5 / 10\npower_seven = x_func ** 7 / 42\npower_nine = x_func ** 9 / 216\nerror_function = error_division * (x_func - power_three + power_five - power_seven + power_nine)\nreturn 1 / 2 * (1 + error_function)"], "bodies_text": "<|body_start_0|>\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p < 0 or p > 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n if data is None:\n self.n = self.n\n self.p = self.p\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.mean = sum(data) / len(data)\n sigma = 0\n i = 1\n for i in range(len(data)):\n sigma += (data[i] - self.mean) ** 2\n variance = 1 / len(data) * sigma\n self.stddev = variance ** (1 / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n raiz = (2 * (self.π * self.stddev ** 2)) ** (1 / 2)\n potencia_1 = -(x - self.mean) ** 2\n potencia_2 = 2 * self.stddev ** 2\n return 1 / raiz * self.e ** (potencia_1 / potencia_2)\n<|end_body_1|>\n\n<|body_start_2|>\n x_func = (x - self.mean) / (self.stddev * 2 ** (1 / 2))\n error_division = 2 / (self.π * (1 / 2))\n power_three = x_func ** 3 / 3\n power_five = x_func ** 5 / 10\n power_seven = x_func ** 7 / 42\n power_nine = x_func ** 9 / 216\n error_function = error_division * (x_func - power_three + power_five - power_seven + power_nine)\n return 1 / 2 * (1 + error_function)\n<|end_body_2|>\n", "class_docstring": "represents a binomial distribution", "class_name": "Binomial", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Binomial:\n \"\"\"represents a binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Binomial contructor\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n <|body_1|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p < 0 or p > 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n if data is None:\n self.n = self.n\n self.p = self.p\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.mean = sum(data) / len(data)\n sigma = 0\n i = 1\n for i in range(len(data)):\n sigma += (data[i] - self.mean) ** 2\n variance = 1 / len(data) * sigma\n self.stddev = variance ** (1 / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n raiz = (2 * (self.π * self.stddev ** 2)) ** (1 / 2)\n potencia_1 = -(x - self.mean) ** 2\n potencia_2 = 2 * self.stddev ** 2\n return 1 / raiz * self.e ** (potencia_1 / potencia_2)\n<|end_body_1|>\n\n<|body_start_2|>\n x_func = (x - self.mean) / (self.stddev * 2 ** (1 / 2))\n error_division = 2 / (self.π * (1 / 2))\n power_three = x_func ** 3 / 3\n power_five = x_func ** 5 / 10\n power_seven = x_func ** 7 / 42\n power_nine = x_func ** 9 / 216\n error_function = error_division * (x_func - power_three + power_five - power_seven + power_nine)\n return 1 / 2 * (1 + error_function)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000135", "length_bytes": 1886, "license_type": "no_license", "methods": [{"docstring": "Binomial contructor", "name": "__init__", "signature": "def __init__(self, data=None, n=1, p=0.5)"}, {"docstring": "Calculates the value of the PMF for a given number", "name": "pmf", "signature": "def pmf(self, k)"}, {"docstring": "Calculates the value of the PMF for a given number", "name": "cdf", "signature": "def cdf(self, k)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_047795", "prompt": "Implement the Python class `Binomial` described below.\n\nClass description:\nrepresents a binomial distribution\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, n=1, p=0.5): Binomial contructor\n- def pmf(self, k): Calculates the value of the PMF for a given number\n- def cdf(self, k): Calculates the value of the PMF for a given number", "prompted_full_text": "Implement the Python class `Binomial` described below.\n\nClass description:\nrepresents a binomial distribution\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, n=1, p=0.5): Binomial contructor\n- def pmf(self, k): Calculates the value of the PMF for a given number\n- def cdf(self, k): Calculates the value of the PMF for a given number\n\n<|skeleton|>\nclass Binomial:\n \"\"\"represents a binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Binomial contructor\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n <|body_1|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p < 0 or p > 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n if data is None:\n self.n = self.n\n self.p = self.p\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.mean = sum(data) / len(data)\n sigma = 0\n i = 1\n for i in range(len(data)):\n sigma += (data[i] - self.mean) ** 2\n variance = 1 / len(data) * sigma\n self.stddev = variance ** (1 / 2)\n<|end_body_0|>\n\n<|body_start_1|>\n raiz = (2 * (self.π * self.stddev ** 2)) ** (1 / 2)\n potencia_1 = -(x - self.mean) ** 2\n potencia_2 = 2 * self.stddev ** 2\n return 1 / raiz * self.e ** (potencia_1 / potencia_2)\n<|end_body_1|>\n\n<|body_start_2|>\n x_func = (x - self.mean) / (self.stddev * 2 ** (1 / 2))\n error_division = 2 / (self.π * (1 / 2))\n power_three = x_func ** 3 / 3\n power_five = x_func ** 5 / 10\n power_seven = x_func ** 7 / 42\n power_nine = x_func ** 9 / 216\n error_function = error_division * (x_func - power_three + power_five - power_seven + power_nine)\n return 1 / 2 * (1 + error_function)\n<|end_body_2|>\n", "revision_id": "4adb0b69ab12ebeec08b1cf603e5c738378f6806", "skeleton": "<|skeleton|>\nclass Binomial:\n \"\"\"represents a binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Binomial contructor\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n <|body_1|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Binomial:\n \"\"\"represents a binomial distribution\"\"\"\n\n def __init__(self, data=None, n=1, p=0.5):\n \"\"\"Binomial contructor\"\"\"\n if n <= 0:\n raise ValueError('n must be a positive value')\n if p < 0 or p > 1:\n raise ValueError('p must be greater than 0 and less than 1')\n self.n = int(n)\n self.p = float(p)\n if data is None:\n self.n = self.n\n self.p = self.p\n else:\n if type(data) is not list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.mean = sum(data) / len(data)\n sigma = 0\n i = 1\n for i in range(len(data)):\n sigma += (data[i] - self.mean) ** 2\n variance = 1 / len(data) * sigma\n self.stddev = variance ** (1 / 2)\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n raiz = (2 * (self.π * self.stddev ** 2)) ** (1 / 2)\n potencia_1 = -(x - self.mean) ** 2\n potencia_2 = 2 * self.stddev ** 2\n return 1 / raiz * self.e ** (potencia_1 / potencia_2)\n\n def cdf(self, k):\n \"\"\"Calculates the value of the PMF for a given number\"\"\"\n x_func = (x - self.mean) / (self.stddev * 2 ** (1 / 2))\n error_division = 2 / (self.π * (1 / 2))\n power_three = x_func ** 3 / 3\n power_five = x_func ** 5 / 10\n power_seven = x_func ** 7 / 42\n power_nine = x_func ** 9 / 216\n error_function = error_division * (x_func - power_three + power_five - power_seven + power_nine)\n return 1 / 2 * (1 + error_function)\n", "source": "the_stack_v2_python_sparse", "source_path": "math/0x03-probability/binomial.py", "source_repo": "Jdavp/holbertonschool-machine_learning", "split": "test", "star_events_count": 0} {"blob_id": "f73da1eb3b5565db6063dc1d94c28bebbfb65a91", "bodies": ["plugin = SpotExtraction()\nexpected_scalar = self.diagnostic_cube_yx.aux_coords\nexpected_nonscalar = []\nx_indices, y_indices = self.coordinate_cube.data\nscalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_yx, x_indices, y_indices)\nself.assertArrayEqual(scalar, expected_scalar)\nself.assertArrayEqual(nonscalar, expected_nonscalar)", "plugin = SpotExtraction()\nexpected_scalar = [coord for coord in self.diagnostic_cube_2d_aux.aux_coords if coord.name() in ['time', 'forecast_reference_time', 'forecast_period']]\nexpected_nonscalar = [self.expected_spot_aux_coord]\nx_indices, y_indices = self.coordinate_cube.data\nscalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\nself.assertArrayEqual(scalar, expected_scalar)\nself.assertArrayEqual(nonscalar, expected_nonscalar)", "plugin = SpotExtraction()\nadditional_2d_crd = self.location_aux_coord.copy()\nadditional_2d_crd.rename('kittens')\nself.diagnostic_cube_2d_aux.add_aux_coord(additional_2d_crd, data_dims=(0, 1))\nadditional_expected = self.expected_spot_aux_coord.copy()\nadditional_expected.rename('kittens')\nexpected_nonscalar = [additional_expected, self.expected_spot_aux_coord]\nx_indices, y_indices = self.coordinate_cube.data\n_, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\nself.assertArrayEqual(nonscalar, expected_nonscalar)"], "bodies_text": "<|body_start_0|>\n plugin = SpotExtraction()\n expected_scalar = self.diagnostic_cube_yx.aux_coords\n expected_nonscalar = []\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_yx, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_0|>\n\n<|body_start_1|>\n plugin = SpotExtraction()\n expected_scalar = [coord for coord in self.diagnostic_cube_2d_aux.aux_coords if coord.name() in ['time', 'forecast_reference_time', 'forecast_period']]\n expected_nonscalar = [self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_1|>\n\n<|body_start_2|>\n plugin = SpotExtraction()\n additional_2d_crd = self.location_aux_coord.copy()\n additional_2d_crd.rename('kittens')\n self.diagnostic_cube_2d_aux.add_aux_coord(additional_2d_crd, data_dims=(0, 1))\n additional_expected = self.expected_spot_aux_coord.copy()\n additional_expected.rename('kittens')\n expected_nonscalar = [additional_expected, self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n _, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_2|>\n", "class_docstring": "Test the extraction of scalar and non-scalar auxiliary coordinates from a cube.", "class_name": "Test_get_aux_coords", "detected_licenses": ["BSD-3-Clause", "LicenseRef-scancode-proprietary-license"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test_get_aux_coords:\n \"\"\"Test the extraction of scalar and non-scalar auxiliary coordinates from a cube.\"\"\"\n\n def test_only_scalar_coords(self):\n \"\"\"Test with an input cube containing only scalar auxiliary coordinates.\"\"\"\n <|body_0|>\n\n def test_scalar_and_nonscalar_coords(self):\n \"\"\"Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.\"\"\"\n <|body_1|>\n\n def test_multiple_nonscalar_coords(self):\n \"\"\"Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plugin = SpotExtraction()\n expected_scalar = self.diagnostic_cube_yx.aux_coords\n expected_nonscalar = []\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_yx, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_0|>\n\n<|body_start_1|>\n plugin = SpotExtraction()\n expected_scalar = [coord for coord in self.diagnostic_cube_2d_aux.aux_coords if coord.name() in ['time', 'forecast_reference_time', 'forecast_period']]\n expected_nonscalar = [self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_1|>\n\n<|body_start_2|>\n plugin = SpotExtraction()\n additional_2d_crd = self.location_aux_coord.copy()\n additional_2d_crd.rename('kittens')\n self.diagnostic_cube_2d_aux.add_aux_coord(additional_2d_crd, data_dims=(0, 1))\n additional_expected = self.expected_spot_aux_coord.copy()\n additional_expected.rename('kittens')\n expected_nonscalar = [additional_expected, self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n _, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000136", "length_bytes": 21606, "license_type": "permissive", "methods": [{"docstring": "Test with an input cube containing only scalar auxiliary coordinates.", "name": "test_only_scalar_coords", "signature": "def test_only_scalar_coords(self)"}, {"docstring": "Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.", "name": "test_scalar_and_nonscalar_coords", "signature": "def test_scalar_and_nonscalar_coords(self)"}, {"docstring": "Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.", "name": "test_multiple_nonscalar_coords", "signature": "def test_multiple_nonscalar_coords(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_040404", "prompt": "Implement the Python class `Test_get_aux_coords` described below.\n\nClass description:\nTest the extraction of scalar and non-scalar auxiliary coordinates from a cube.\n\nMethod signatures and docstrings:\n- def test_only_scalar_coords(self): Test with an input cube containing only scalar auxiliary coordinates.\n- def test_scalar_and_nonscalar_coords(self): Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.\n- def test_multiple_nonscalar_coords(self): Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.", "prompted_full_text": "Implement the Python class `Test_get_aux_coords` described below.\n\nClass description:\nTest the extraction of scalar and non-scalar auxiliary coordinates from a cube.\n\nMethod signatures and docstrings:\n- def test_only_scalar_coords(self): Test with an input cube containing only scalar auxiliary coordinates.\n- def test_scalar_and_nonscalar_coords(self): Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.\n- def test_multiple_nonscalar_coords(self): Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.\n\n<|skeleton|>\nclass Test_get_aux_coords:\n \"\"\"Test the extraction of scalar and non-scalar auxiliary coordinates from a cube.\"\"\"\n\n def test_only_scalar_coords(self):\n \"\"\"Test with an input cube containing only scalar auxiliary coordinates.\"\"\"\n <|body_0|>\n\n def test_scalar_and_nonscalar_coords(self):\n \"\"\"Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.\"\"\"\n <|body_1|>\n\n def test_multiple_nonscalar_coords(self):\n \"\"\"Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plugin = SpotExtraction()\n expected_scalar = self.diagnostic_cube_yx.aux_coords\n expected_nonscalar = []\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_yx, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_0|>\n\n<|body_start_1|>\n plugin = SpotExtraction()\n expected_scalar = [coord for coord in self.diagnostic_cube_2d_aux.aux_coords if coord.name() in ['time', 'forecast_reference_time', 'forecast_period']]\n expected_nonscalar = [self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_1|>\n\n<|body_start_2|>\n plugin = SpotExtraction()\n additional_2d_crd = self.location_aux_coord.copy()\n additional_2d_crd.rename('kittens')\n self.diagnostic_cube_2d_aux.add_aux_coord(additional_2d_crd, data_dims=(0, 1))\n additional_expected = self.expected_spot_aux_coord.copy()\n additional_expected.rename('kittens')\n expected_nonscalar = [additional_expected, self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n _, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n<|end_body_2|>\n", "revision_id": "cd2c9019944345df1e703bf8f625db537ad9f559", "skeleton": "<|skeleton|>\nclass Test_get_aux_coords:\n \"\"\"Test the extraction of scalar and non-scalar auxiliary coordinates from a cube.\"\"\"\n\n def test_only_scalar_coords(self):\n \"\"\"Test with an input cube containing only scalar auxiliary coordinates.\"\"\"\n <|body_0|>\n\n def test_scalar_and_nonscalar_coords(self):\n \"\"\"Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.\"\"\"\n <|body_1|>\n\n def test_multiple_nonscalar_coords(self):\n \"\"\"Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Test_get_aux_coords:\n \"\"\"Test the extraction of scalar and non-scalar auxiliary coordinates from a cube.\"\"\"\n\n def test_only_scalar_coords(self):\n \"\"\"Test with an input cube containing only scalar auxiliary coordinates.\"\"\"\n plugin = SpotExtraction()\n expected_scalar = self.diagnostic_cube_yx.aux_coords\n expected_nonscalar = []\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_yx, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n\n def test_scalar_and_nonscalar_coords(self):\n \"\"\"Test with an input cube containing scalar and nonscalar auxiliary coordinates. The returned non-scalar coordinate is a 1D representation of the 2D non-scalar input coordinate at spot sites.\"\"\"\n plugin = SpotExtraction()\n expected_scalar = [coord for coord in self.diagnostic_cube_2d_aux.aux_coords if coord.name() in ['time', 'forecast_reference_time', 'forecast_period']]\n expected_nonscalar = [self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n scalar, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(scalar, expected_scalar)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n\n def test_multiple_nonscalar_coords(self):\n \"\"\"Test with an input cube containing multiple nonscalar auxiliary coordinates. The returned non-scalar coordinates are 1D representations of the 2D non-scalar input coordinates at spot sites.\"\"\"\n plugin = SpotExtraction()\n additional_2d_crd = self.location_aux_coord.copy()\n additional_2d_crd.rename('kittens')\n self.diagnostic_cube_2d_aux.add_aux_coord(additional_2d_crd, data_dims=(0, 1))\n additional_expected = self.expected_spot_aux_coord.copy()\n additional_expected.rename('kittens')\n expected_nonscalar = [additional_expected, self.expected_spot_aux_coord]\n x_indices, y_indices = self.coordinate_cube.data\n _, nonscalar = plugin.get_aux_coords(self.diagnostic_cube_2d_aux, x_indices, y_indices)\n self.assertArrayEqual(nonscalar, expected_nonscalar)\n", "source": "the_stack_v2_python_sparse", "source_path": "improver_tests/spotdata/test_SpotExtraction.py", "source_repo": "metoppv/improver", "split": "test", "star_events_count": 101} {"blob_id": "8fcdeb476537f3e9146f93f8f26d872a0f5910e7", "bodies": ["def similarity(hex1, hex2):\n r1, g1, b1 = (hex1 >> 16, (hex1 >> 8) % 256, hex1 % 256)\n r2, g2, b2 = (hex2 >> 16, (hex2 >> 8) % 256, hex2 % 256)\n return -(r1 - r2) ** 2 - (g1 - g2) ** 2 - (b1 - b2) ** 2\nhex1 = int(color[1:], 16)\nans = 0\nfor r in range(16):\n for g in range(16):\n for b in range(16):\n hex2 = 17 * r * (1 << 16) + 17 * g * (1 << 8) + 17 * b\n if similarity(hex1, hex2) > similarity(hex1, ans):\n ans = hex2\nreturn '#{:06x}'.format(ans)", "def f(comp):\n q, r = divmod(int(comp, 16), 17)\n if r > 8:\n q += 1\n return '{:02x}'.format(17 * q)\nreturn '#' + f(color[1:3]) + f(color[3:5]) + f(color[5:])"], "bodies_text": "<|body_start_0|>\n def similarity(hex1, hex2):\n r1, g1, b1 = (hex1 >> 16, (hex1 >> 8) % 256, hex1 % 256)\n r2, g2, b2 = (hex2 >> 16, (hex2 >> 8) % 256, hex2 % 256)\n return -(r1 - r2) ** 2 - (g1 - g2) ** 2 - (b1 - b2) ** 2\n hex1 = int(color[1:], 16)\n ans = 0\n for r in range(16):\n for g in range(16):\n for b in range(16):\n hex2 = 17 * r * (1 << 16) + 17 * g * (1 << 8) + 17 * b\n if similarity(hex1, hex2) > similarity(hex1, ans):\n ans = hex2\n return '#{:06x}'.format(ans)\n<|end_body_0|>\n\n<|body_start_1|>\n def f(comp):\n q, r = divmod(int(comp, 16), 17)\n if r > 8:\n q += 1\n return '{:02x}'.format(17 * q)\n return '#' + f(color[1:3]) + f(color[3:5]) + f(color[5:])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def similarRGB(self, color: str) -> str:\n \"\"\"For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel\"\"\"\n <|body_0|>\n\n def similarRGB_1(self, color: str) -> str:\n \"\"\"Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def similarity(hex1, hex2):\n r1, g1, b1 = (hex1 >> 16, (hex1 >> 8) % 256, hex1 % 256)\n r2, g2, b2 = (hex2 >> 16, (hex2 >> 8) % 256, hex2 % 256)\n return -(r1 - r2) ** 2 - (g1 - g2) ** 2 - (b1 - b2) ** 2\n hex1 = int(color[1:], 16)\n ans = 0\n for r in range(16):\n for g in range(16):\n for b in range(16):\n hex2 = 17 * r * (1 << 16) + 17 * g * (1 << 8) + 17 * b\n if similarity(hex1, hex2) > similarity(hex1, ans):\n ans = hex2\n return '#{:06x}'.format(ans)\n<|end_body_0|>\n\n<|body_start_1|>\n def f(comp):\n q, r = divmod(int(comp, 16), 17)\n if r > 8:\n q += 1\n return '{:02x}'.format(17 * q)\n return '#' + f(color[1:3]) + f(color[3:5]) + f(color[5:])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000137", "length_bytes": 4163, "license_type": "no_license", "methods": [{"docstring": "For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel", "name": "similarRGB", "signature": "def similarRGB(self, color: str) -> str"}, {"docstring": "Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.", "name": "similarRGB_1", "signature": "def similarRGB_1(self, color: str) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005482", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def similarRGB(self, color: str) -> str: For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel\n- def similarRGB_1(self, color: str) -> str: Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def similarRGB(self, color: str) -> str: For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel\n- def similarRGB_1(self, color: str) -> str: Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.\n\n<|skeleton|>\nclass Solution:\n\n def similarRGB(self, color: str) -> str:\n \"\"\"For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel\"\"\"\n <|body_0|>\n\n def similarRGB_1(self, color: str) -> str:\n \"\"\"Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def similarity(hex1, hex2):\n r1, g1, b1 = (hex1 >> 16, (hex1 >> 8) % 256, hex1 % 256)\n r2, g2, b2 = (hex2 >> 16, (hex2 >> 8) % 256, hex2 % 256)\n return -(r1 - r2) ** 2 - (g1 - g2) ** 2 - (b1 - b2) ** 2\n hex1 = int(color[1:], 16)\n ans = 0\n for r in range(16):\n for g in range(16):\n for b in range(16):\n hex2 = 17 * r * (1 << 16) + 17 * g * (1 << 8) + 17 * b\n if similarity(hex1, hex2) > similarity(hex1, ans):\n ans = hex2\n return '#{:06x}'.format(ans)\n<|end_body_0|>\n\n<|body_start_1|>\n def f(comp):\n q, r = divmod(int(comp, 16), 17)\n if r > 8:\n q += 1\n return '{:02x}'.format(17 * q)\n return '#' + f(color[1:3]) + f(color[3:5]) + f(color[5:])\n<|end_body_1|>\n", "revision_id": "727dec2e23e765925a5e7e003fc99aeaf25111e9", "skeleton": "<|skeleton|>\nclass Solution:\n\n def similarRGB(self, color: str) -> str:\n \"\"\"For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel\"\"\"\n <|body_0|>\n\n def similarRGB_1(self, color: str) -> str:\n \"\"\"Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def similarRGB(self, color: str) -> str:\n \"\"\"For each possible shorthand-RGB color from \"#000\" to \"#fff\", let's find it's similarity to the given color. We'll take the best one. To iterate over each shorthand color, we'll use an integer based approach, (though other ones exist.) Each digit in the shorthand \"#RGB\" could be from 0 to 15. This leads to a color of 17 * R * (1 << 16) + 17 * G * (1 << 8) + 17 * B. The reason for the 17 is because a hexadecimal value of 0x22 is equal to 2 * 16 + 2 * 1 which is 2 * (17). The other values for red and green work similarly, just shifted up by 8 or 16 bits. To determine the similarity between two colors represented as integers, we'll sum the similarity of each of their colored components separatel\"\"\"\n def similarity(hex1, hex2):\n r1, g1, b1 = (hex1 >> 16, (hex1 >> 8) % 256, hex1 % 256)\n r2, g2, b2 = (hex2 >> 16, (hex2 >> 8) % 256, hex2 % 256)\n return -(r1 - r2) ** 2 - (g1 - g2) ** 2 - (b1 - b2) ** 2\n hex1 = int(color[1:], 16)\n ans = 0\n for r in range(16):\n for g in range(16):\n for b in range(16):\n hex2 = 17 * r * (1 << 16) + 17 * g * (1 << 8) + 17 * b\n if similarity(hex1, hex2) > similarity(hex1, ans):\n ans = hex2\n return '#{:06x}'.format(ans)\n\n def similarRGB_1(self, color: str) -> str:\n \"\"\"Because color similarity is a sum of the similarity of individual color components, we can treat each colored component separately and combine the answer. As in the previous approach, we want the colored component to be the closest to a multiple of 17. We can just round it to the closest such value.\"\"\"\n def f(comp):\n q, r = divmod(int(comp, 16), 17)\n if r > 8:\n q += 1\n return '{:02x}'.format(17 * q)\n return '#' + f(color[1:3]) + f(color[3:5]) + f(color[5:])\n", "source": "the_stack_v2_python_sparse", "source_path": "funNLearn/src/main/java/dsAlgo/leetcode/P8xx/P800_SimilarRGBColor.py", "source_repo": "vishalpmittal/practice-fun", "split": "test", "star_events_count": 0} {"blob_id": "283c9361e1493c68fdc7999c0416a3010c7e9ff3", "bodies": ["warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\niclass = SearchIterator\nif kw.get('return_csv', False):\n iclass = CSVChunkIterator\nreturn super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/webapp/search', **kw)", "warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\niclass = SearchIterator\nif kw.get('return_csv', False):\n iclass = CSVChunkIterator\nreturn super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/cloud_resource/search', **kw)", "warnings.warn('Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead.')\niclass = SearchIterator\nif kw.get('return_csv', False):\n iclass = CSVChunkIterator\nreturn super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/host/search', **kw)"], "bodies_text": "<|body_start_0|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/webapp/search', **kw)\n<|end_body_0|>\n\n<|body_start_1|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/cloud_resource/search', **kw)\n<|end_body_1|>\n\n<|body_start_2|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/host/search', **kw)\n<|end_body_2|>\n", "class_docstring": "API class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.", "class_name": "FindingsAPI", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FindingsAPI:\n \"\"\"API class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.\"\"\"\n\n def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],\"\"\"\n <|body_0|>\n\n def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...\"\"\"\n <|body_1|>\n\n def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/webapp/search', **kw)\n<|end_body_0|>\n\n<|body_start_1|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/cloud_resource/search', **kw)\n<|end_body_1|>\n\n<|body_start_2|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/host/search', **kw)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000138", "length_bytes": 12269, "license_type": "permissive", "methods": [{"docstring": "Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],", "name": "search_webapp", "signature": "def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]"}, {"docstring": "Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...", "name": "search_cloud_resource", "signature": "def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]"}, {"docstring": "Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':", "name": "search_host", "signature": "def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_016704", "prompt": "Implement the Python class `FindingsAPI` described below.\n\nClass description:\nAPI class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.\n\nMethod signatures and docstrings:\n- def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]: Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],\n- def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]: Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...\n- def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]: Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':", "prompted_full_text": "Implement the Python class `FindingsAPI` described below.\n\nClass description:\nAPI class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.\n\nMethod signatures and docstrings:\n- def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]: Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],\n- def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]: Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...\n- def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]: Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':\n\n<|skeleton|>\nclass FindingsAPI:\n \"\"\"API class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.\"\"\"\n\n def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],\"\"\"\n <|body_0|>\n\n def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...\"\"\"\n <|body_1|>\n\n def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/webapp/search', **kw)\n<|end_body_0|>\n\n<|body_start_1|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/cloud_resource/search', **kw)\n<|end_body_1|>\n\n<|body_start_2|>\n warnings.warn('Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/host/search', **kw)\n<|end_body_2|>\n", "revision_id": "4e31049891f55016168b14ae30d332a965523640", "skeleton": "<|skeleton|>\nclass FindingsAPI:\n \"\"\"API class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.\"\"\"\n\n def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],\"\"\"\n <|body_0|>\n\n def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...\"\"\"\n <|body_1|>\n\n def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FindingsAPI:\n \"\"\"API class containing all the methods related to Findings. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use `tio.exports.vulns()`, which is the equivalent V2 API for `search_host()`.\"\"\"\n\n def search_webapp(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the WAS Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ... ] ... }],\"\"\"\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/webapp/search', **kw)\n\n def search_cloud_resource(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Cloud Resource Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated and must no longer be used. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator': 'oper', ... 'property': '2' ... } ...\"\"\"\n warnings.warn('Tenable.io Findings V3 APIs are deprecated and must no longer be used.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/cloud_resource/search', **kw)\n\n def search_host(self, **kw) -> Union[SearchIterator, CSVChunkIterator, Response]:\n \"\"\"Search and retrieve the Host Vulnerabilities based on supported conditions. Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead. Args: fields (list, optional): The list of field names to return from the Tenable API. Example: >>> ['field1', 'field2'] filter (tuple, dict, optional): A nestable filter object detailing how to filter the results down to the desired subset. Examples: >>> ('or', ('and', ('test', 'oper', '1'), ... ('test', 'oper', '2') ... ), ... 'and', ('test', 'oper', 3) ... ) >>> { ... 'or': [{ ... 'and': [{ ... 'value': '1', ... 'operator': 'oper', ... 'property': '1' ... }, ... { ... 'value': '2', ... 'operator':\"\"\"\n warnings.warn('Tenable.io Findings V3 APIs are deprecated. Tenable recommends that you use the `tio.exports.vulns()` method instead.')\n iclass = SearchIterator\n if kw.get('return_csv', False):\n iclass = CSVChunkIterator\n return super()._search(iterator_cls=iclass, sort_type=self._sort_type.property_based, resource='findings', api_path=f'{self._path}/host/search', **kw)\n", "source": "the_stack_v2_python_sparse", "source_path": "tenable/io/v3/explore/findings/api.py", "source_repo": "tenable/pyTenable", "split": "test", "star_events_count": 300} {"blob_id": "7f91ea3c98907597d5448f682966786f1d91b46c", "bodies": ["super().__init__(cookie)\nsymbols, count = self._parse_cookie()\nself._symbols = symbols\nself._count = count", "symbols = re.split('([=:&])', self._value)\nlast = (len(symbols) - 3) // 4\ncount = last + 1\nreturn (symbols, count)", "replacement = copy(self._symbols)\ncurrent = index * 4 + 2\nreplacement[current] = payload\nif self._quoted:\n return '\"' + ''.join(replacement) + '\"'\nelse:\n return ''.join(replacement)", "yield next(super().replace(payload))\nfor i in range(self._count):\n yield self._replace_at(i, payload)"], "bodies_text": "<|body_start_0|>\n super().__init__(cookie)\n symbols, count = self._parse_cookie()\n self._symbols = symbols\n self._count = count\n<|end_body_0|>\n\n<|body_start_1|>\n symbols = re.split('([=:&])', self._value)\n last = (len(symbols) - 3) // 4\n count = last + 1\n return (symbols, count)\n<|end_body_1|>\n\n<|body_start_2|>\n replacement = copy(self._symbols)\n current = index * 4 + 2\n replacement[current] = payload\n if self._quoted:\n return '\"' + ''.join(replacement) + '\"'\n else:\n return ''.join(replacement)\n<|end_body_2|>\n\n<|body_start_3|>\n yield next(super().replace(payload))\n for i in range(self._count):\n yield self._replace_at(i, payload)\n<|end_body_3|>\n", "class_docstring": "This is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.", "class_name": "ComplexCookie", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ComplexCookie:\n \"\"\"This is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.\"\"\"\n\n def __init__(self, cookie):\n \"\"\"Sets the string, parses the cookie into tokens, and sets the value count.\"\"\"\n <|body_0|>\n\n def _parse_cookie(self):\n \"\"\"Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count\"\"\"\n <|body_1|>\n\n def _replace_at(self, index, payload):\n \"\"\"Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string\"\"\"\n <|body_2|>\n\n def replace(self, payload):\n \"\"\"Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(cookie)\n symbols, count = self._parse_cookie()\n self._symbols = symbols\n self._count = count\n<|end_body_0|>\n\n<|body_start_1|>\n symbols = re.split('([=:&])', self._value)\n last = (len(symbols) - 3) // 4\n count = last + 1\n return (symbols, count)\n<|end_body_1|>\n\n<|body_start_2|>\n replacement = copy(self._symbols)\n current = index * 4 + 2\n replacement[current] = payload\n if self._quoted:\n return '\"' + ''.join(replacement) + '\"'\n else:\n return ''.join(replacement)\n<|end_body_2|>\n\n<|body_start_3|>\n yield next(super().replace(payload))\n for i in range(self._count):\n yield self._replace_at(i, payload)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000139", "length_bytes": 3236, "license_type": "permissive", "methods": [{"docstring": "Sets the string, parses the cookie into tokens, and sets the value count.", "name": "__init__", "signature": "def __init__(self, cookie)"}, {"docstring": "Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count", "name": "_parse_cookie", "signature": "def _parse_cookie(self)"}, {"docstring": "Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string", "name": "_replace_at", "signature": "def _replace_at(self, index, payload)"}, {"docstring": "Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements", "name": "replace", "signature": "def replace(self, payload)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_001569", "prompt": "Implement the Python class `ComplexCookie` described below.\n\nClass description:\nThis is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.\n\nMethod signatures and docstrings:\n- def __init__(self, cookie): Sets the string, parses the cookie into tokens, and sets the value count.\n- def _parse_cookie(self): Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count\n- def _replace_at(self, index, payload): Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string\n- def replace(self, payload): Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements", "prompted_full_text": "Implement the Python class `ComplexCookie` described below.\n\nClass description:\nThis is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.\n\nMethod signatures and docstrings:\n- def __init__(self, cookie): Sets the string, parses the cookie into tokens, and sets the value count.\n- def _parse_cookie(self): Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count\n- def _replace_at(self, index, payload): Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string\n- def replace(self, payload): Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements\n\n<|skeleton|>\nclass ComplexCookie:\n \"\"\"This is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.\"\"\"\n\n def __init__(self, cookie):\n \"\"\"Sets the string, parses the cookie into tokens, and sets the value count.\"\"\"\n <|body_0|>\n\n def _parse_cookie(self):\n \"\"\"Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count\"\"\"\n <|body_1|>\n\n def _replace_at(self, index, payload):\n \"\"\"Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string\"\"\"\n <|body_2|>\n\n def replace(self, payload):\n \"\"\"Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(cookie)\n symbols, count = self._parse_cookie()\n self._symbols = symbols\n self._count = count\n<|end_body_0|>\n\n<|body_start_1|>\n symbols = re.split('([=:&])', self._value)\n last = (len(symbols) - 3) // 4\n count = last + 1\n return (symbols, count)\n<|end_body_1|>\n\n<|body_start_2|>\n replacement = copy(self._symbols)\n current = index * 4 + 2\n replacement[current] = payload\n if self._quoted:\n return '\"' + ''.join(replacement) + '\"'\n else:\n return ''.join(replacement)\n<|end_body_2|>\n\n<|body_start_3|>\n yield next(super().replace(payload))\n for i in range(self._count):\n yield self._replace_at(i, payload)\n<|end_body_3|>\n", "revision_id": "4483b301034a096b716646a470a6642b3df8ce61", "skeleton": "<|skeleton|>\nclass ComplexCookie:\n \"\"\"This is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.\"\"\"\n\n def __init__(self, cookie):\n \"\"\"Sets the string, parses the cookie into tokens, and sets the value count.\"\"\"\n <|body_0|>\n\n def _parse_cookie(self):\n \"\"\"Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count\"\"\"\n <|body_1|>\n\n def _replace_at(self, index, payload):\n \"\"\"Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string\"\"\"\n <|body_2|>\n\n def replace(self, payload):\n \"\"\"Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ComplexCookie:\n \"\"\"This is for complex cookie. They are cookie strings that contain list of key/value pairs delimited by &, :, and =. String are decomposed into key/value pairs. Values can be replaced with payloads within a re-created cookie string.\"\"\"\n\n def __init__(self, cookie):\n \"\"\"Sets the string, parses the cookie into tokens, and sets the value count.\"\"\"\n super().__init__(cookie)\n symbols, count = self._parse_cookie()\n self._symbols = symbols\n self._count = count\n\n def _parse_cookie(self):\n \"\"\"Parse the cookie into a set of tokens to specify key/value pairs and delimiters. Cookie strings are decomposed by =, then :, then & into symbols. Key/value pairs can be delimited by = or :. Lists of key/value pairs can be delimited by & or :. :return: list of symbols and count\"\"\"\n symbols = re.split('([=:&])', self._value)\n last = (len(symbols) - 3) // 4\n count = last + 1\n return (symbols, count)\n\n def _replace_at(self, index, payload):\n \"\"\"Replace value at specified index with given payload. This is used by replace_values() to replace values individually. :param index: value index as integer :param payload: payload string :return: replacement string\"\"\"\n replacement = copy(self._symbols)\n current = index * 4 + 2\n replacement[current] = payload\n if self._quoted:\n return '\"' + ''.join(replacement) + '\"'\n else:\n return ''.join(replacement)\n\n def replace(self, payload):\n \"\"\"Replace each value of key/value pairs within the cookie string with given payload. Values are replaced individually and returned as a list of replacements. :param payload: payload string :return: list of replacements\"\"\"\n yield next(super().replace(payload))\n for i in range(self._count):\n yield self._replace_at(i, payload)\n", "source": "the_stack_v2_python_sparse", "source_path": "ava/parsers/cookie.py", "source_repo": "indeedsecurity/ava-ce", "split": "test", "star_events_count": 3} {"blob_id": "968a43104484f5c7ae6d842780caceebfd4563e8", "bodies": ["super(Yandex, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\nself.api_key = api_key\nself.lang = lang\ndomain = 'geocode-maps.yandex.ru'\nself.api = '%s://%s%s' % (self.scheme, domain, self.api_path)", "params = {'geocode': self.format_string % query, 'format': 'json'}\nif self.api_key:\n params['apikey'] = self.api_key\nif self.lang:\n params['lang'] = self.lang\nif exactly_one:\n params['results'] = 1\nurl = '?'.join((self.api, urlencode(params)))\nlogger.debug('%s.geocode: %s', self.__class__.__name__, url)\nreturn self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)", "if exactly_one is DEFAULT_SENTINEL:\n warnings.warn('%s.reverse: default value for `exactly_one` argument will become True in geopy 2.0. Specify `exactly_one=False` as the argument explicitly to get rid of this warning.' % type(self).__name__, DeprecationWarning, stacklevel=2)\n exactly_one = False\ntry:\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\nexcept ValueError:\n raise ValueError('Must be a coordinate pair or Point')\nparams = {'geocode': point, 'format': 'json'}\nif self.api_key:\n params['apikey'] = self.api_key\nif self.lang:\n params['lang'] = self.lang\nif kind:\n params['kind'] = kind\nurl = '?'.join((self.api, urlencode(params)))\nlogger.debug('%s.reverse: %s', self.__class__.__name__, url)\nreturn self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)", "if doc.get('error'):\n raise GeocoderServiceError(doc['error']['message'])\ntry:\n places = doc['response']['GeoObjectCollection']['featureMember']\nexcept KeyError:\n raise GeocoderParseError('Failed to parse server response')\n\ndef parse_code(place):\n \"\"\"\n Parse each record.\n \"\"\"\n try:\n place = place['GeoObject']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n longitude, latitude = [float(_) for _ in place['Point']['pos'].split(' ')]\n name_elements = ['name', 'description']\n location = ', '.join([place[k] for k in name_elements if place.get(k)])\n return Location(location, (latitude, longitude), place)\nif exactly_one:\n try:\n return parse_code(places[0])\n except IndexError:\n return None\nelse:\n return [parse_code(place) for place in places]"], "bodies_text": "<|body_start_0|>\n super(Yandex, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.lang = lang\n domain = 'geocode-maps.yandex.ru'\n self.api = '%s://%s%s' % (self.scheme, domain, self.api_path)\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'geocode': self.format_string % query, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if exactly_one:\n params['results'] = 1\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_1|>\n\n<|body_start_2|>\n if exactly_one is DEFAULT_SENTINEL:\n warnings.warn('%s.reverse: default value for `exactly_one` argument will become True in geopy 2.0. Specify `exactly_one=False` as the argument explicitly to get rid of this warning.' % type(self).__name__, DeprecationWarning, stacklevel=2)\n exactly_one = False\n try:\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n except ValueError:\n raise ValueError('Must be a coordinate pair or Point')\n params = {'geocode': point, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if kind:\n params['kind'] = kind\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_2|>\n\n<|body_start_3|>\n if doc.get('error'):\n raise GeocoderServiceError(doc['error']['message'])\n try:\n places = doc['response']['GeoObjectCollection']['featureMember']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n\n def parse_code(place):\n \"\"\"\n Parse each record.\n \"\"\"\n try:\n place = place['GeoObject']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n longitude, latitude = [float(_) for _ in place['Point']['pos'].split(' ')]\n name_elements = ['name', 'description']\n location = ', '.join([place[k] for k in name_elements if place.get(k)])\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n try:\n return parse_code(places[0])\n except IndexError:\n return None\n else:\n return [parse_code(place) for place in places]\n<|end_body_3|>\n", "class_docstring": "Yandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0", "class_name": "Yandex", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Yandex:\n \"\"\"Yandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0\"\"\"\n\n def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL):\n \"\"\".. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See\"\"\"\n <|body_0|>\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n <|body_1|>\n\n def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo\"\"\"\n <|body_2|>\n\n def _parse_json(self, doc, exactly_one):\n \"\"\"Parse JSON response body.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Yandex, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.lang = lang\n domain = 'geocode-maps.yandex.ru'\n self.api = '%s://%s%s' % (self.scheme, domain, self.api_path)\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'geocode': self.format_string % query, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if exactly_one:\n params['results'] = 1\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_1|>\n\n<|body_start_2|>\n if exactly_one is DEFAULT_SENTINEL:\n warnings.warn('%s.reverse: default value for `exactly_one` argument will become True in geopy 2.0. Specify `exactly_one=False` as the argument explicitly to get rid of this warning.' % type(self).__name__, DeprecationWarning, stacklevel=2)\n exactly_one = False\n try:\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n except ValueError:\n raise ValueError('Must be a coordinate pair or Point')\n params = {'geocode': point, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if kind:\n params['kind'] = kind\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_2|>\n\n<|body_start_3|>\n if doc.get('error'):\n raise GeocoderServiceError(doc['error']['message'])\n try:\n places = doc['response']['GeoObjectCollection']['featureMember']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n\n def parse_code(place):\n \"\"\"\n Parse each record.\n \"\"\"\n try:\n place = place['GeoObject']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n longitude, latitude = [float(_) for _ in place['Point']['pos'].split(' ')]\n name_elements = ['name', 'description']\n location = ', '.join([place[k] for k in name_elements if place.get(k)])\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n try:\n return parse_code(places[0])\n except IndexError:\n return None\n else:\n return [parse_code(place) for place in places]\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000140", "length_bytes": 7530, "license_type": "permissive", "methods": [{"docstring": ".. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See", "name": "__init__", "signature": "def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL)"}, {"docstring": "Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.", "name": "geocode", "signature": "def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL)"}, {"docstring": "Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo", "name": "reverse", "signature": "def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None)"}, {"docstring": "Parse JSON response body.", "name": "_parse_json", "signature": "def _parse_json(self, doc, exactly_one)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_019410", "prompt": "Implement the Python class `Yandex` described below.\n\nClass description:\nYandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0\n\nMethod signatures and docstrings:\n- def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL): .. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See\n- def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL): Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\n- def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None): Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo\n- def _parse_json(self, doc, exactly_one): Parse JSON response body.", "prompted_full_text": "Implement the Python class `Yandex` described below.\n\nClass description:\nYandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0\n\nMethod signatures and docstrings:\n- def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL): .. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See\n- def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL): Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\n- def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None): Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo\n- def _parse_json(self, doc, exactly_one): Parse JSON response body.\n\n<|skeleton|>\nclass Yandex:\n \"\"\"Yandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0\"\"\"\n\n def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL):\n \"\"\".. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See\"\"\"\n <|body_0|>\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n <|body_1|>\n\n def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo\"\"\"\n <|body_2|>\n\n def _parse_json(self, doc, exactly_one):\n \"\"\"Parse JSON response body.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Yandex, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.lang = lang\n domain = 'geocode-maps.yandex.ru'\n self.api = '%s://%s%s' % (self.scheme, domain, self.api_path)\n<|end_body_0|>\n\n<|body_start_1|>\n params = {'geocode': self.format_string % query, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if exactly_one:\n params['results'] = 1\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_1|>\n\n<|body_start_2|>\n if exactly_one is DEFAULT_SENTINEL:\n warnings.warn('%s.reverse: default value for `exactly_one` argument will become True in geopy 2.0. Specify `exactly_one=False` as the argument explicitly to get rid of this warning.' % type(self).__name__, DeprecationWarning, stacklevel=2)\n exactly_one = False\n try:\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n except ValueError:\n raise ValueError('Must be a coordinate pair or Point')\n params = {'geocode': point, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if kind:\n params['kind'] = kind\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n<|end_body_2|>\n\n<|body_start_3|>\n if doc.get('error'):\n raise GeocoderServiceError(doc['error']['message'])\n try:\n places = doc['response']['GeoObjectCollection']['featureMember']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n\n def parse_code(place):\n \"\"\"\n Parse each record.\n \"\"\"\n try:\n place = place['GeoObject']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n longitude, latitude = [float(_) for _ in place['Point']['pos'].split(' ')]\n name_elements = ['name', 'description']\n location = ', '.join([place[k] for k in name_elements if place.get(k)])\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n try:\n return parse_code(places[0])\n except IndexError:\n return None\n else:\n return [parse_code(place) for place in places]\n<|end_body_3|>\n", "revision_id": "56d7d60d25d33b000185251d7e4b4fa27679d27f", "skeleton": "<|skeleton|>\nclass Yandex:\n \"\"\"Yandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0\"\"\"\n\n def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL):\n \"\"\".. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See\"\"\"\n <|body_0|>\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n <|body_1|>\n\n def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo\"\"\"\n <|body_2|>\n\n def _parse_json(self, doc, exactly_one):\n \"\"\"Parse JSON response body.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Yandex:\n \"\"\"Yandex geocoder. Documentation at: https://tech.yandex.com/maps/doc/geocoder/desc/concepts/input_params-docpage/ .. versionadded:: 1.5.0\"\"\"\n\n def __init__(self, api_key=None, lang=None, timeout=DEFAULT_SENTINEL, proxies=DEFAULT_SENTINEL, user_agent=None, scheme=None, format_string=None, ssl_context=DEFAULT_SENTINEL):\n \"\"\".. versionchanged:: 1.14.0 Default scheme has been changed from ``http`` to ``https``. :param str api_key: Yandex API key (not obligatory) https://tech.yandex.ru/maps/keys/get/ :param str lang: response locale, the following locales are supported: ``\"ru_RU\"`` (default), ``\"uk_UA\"``, ``\"be_BY\"``, ``\"en_US\"``, ``\"tr_TR\"``. :param int timeout: See :attr:`geopy.geocoders.options.default_timeout`. :param dict proxies: See :attr:`geopy.geocoders.options.default_proxies`. :param str user_agent: See :attr:`geopy.geocoders.options.default_user_agent`. .. versionadded:: 1.12.0 :param str scheme: See :attr:`geopy.geocoders.options.default_scheme`. .. versionadded:: 1.14.0 :param str format_string: See\"\"\"\n super(Yandex, self).__init__(format_string=format_string, scheme=scheme, timeout=timeout, proxies=proxies, user_agent=user_agent, ssl_context=ssl_context)\n self.api_key = api_key\n self.lang = lang\n domain = 'geocode-maps.yandex.ru'\n self.api = '%s://%s%s' % (self.scheme, domain, self.api_path)\n\n def geocode(self, query, exactly_one=True, timeout=DEFAULT_SENTINEL):\n \"\"\"Return a location point by address. :param str query: The address or query you wish to geocode. :param bool exactly_one: Return one result or a list of results, if available. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``.\"\"\"\n params = {'geocode': self.format_string % query, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if exactly_one:\n params['results'] = 1\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.geocode: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n\n def reverse(self, query, exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, kind=None):\n \"\"\"Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``\"%(latitude)s, %(longitude)s\"``. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond befo\"\"\"\n if exactly_one is DEFAULT_SENTINEL:\n warnings.warn('%s.reverse: default value for `exactly_one` argument will become True in geopy 2.0. Specify `exactly_one=False` as the argument explicitly to get rid of this warning.' % type(self).__name__, DeprecationWarning, stacklevel=2)\n exactly_one = False\n try:\n point = self._coerce_point_to_string(query, '%(lon)s,%(lat)s')\n except ValueError:\n raise ValueError('Must be a coordinate pair or Point')\n params = {'geocode': point, 'format': 'json'}\n if self.api_key:\n params['apikey'] = self.api_key\n if self.lang:\n params['lang'] = self.lang\n if kind:\n params['kind'] = kind\n url = '?'.join((self.api, urlencode(params)))\n logger.debug('%s.reverse: %s', self.__class__.__name__, url)\n return self._parse_json(self._call_geocoder(url, timeout=timeout), exactly_one)\n\n def _parse_json(self, doc, exactly_one):\n \"\"\"Parse JSON response body.\"\"\"\n if doc.get('error'):\n raise GeocoderServiceError(doc['error']['message'])\n try:\n places = doc['response']['GeoObjectCollection']['featureMember']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n\n def parse_code(place):\n \"\"\"\n Parse each record.\n \"\"\"\n try:\n place = place['GeoObject']\n except KeyError:\n raise GeocoderParseError('Failed to parse server response')\n longitude, latitude = [float(_) for _ in place['Point']['pos'].split(' ')]\n name_elements = ['name', 'description']\n location = ', '.join([place[k] for k in name_elements if place.get(k)])\n return Location(location, (latitude, longitude), place)\n if exactly_one:\n try:\n return parse_code(places[0])\n except IndexError:\n return None\n else:\n return [parse_code(place) for place in places]\n", "source": "the_stack_v2_python_sparse", "source_path": "HelloWorldWebsite/env/lib/python3.7/site-packages/geopy/geocoders/yandex.py", "source_repo": "rmnattas/team-64", "split": "test", "star_events_count": 2} {"blob_id": "59aac3e324493544bd61a43420438d5a2a7cc4b2", "bodies": ["super().__init__()\nself._shuffle = shuffle\nall_data: tp.Dict = dict(cats=cats, conts=conts, y=y)\nall_input_data = {name: arr for name, arr in all_data.items() if arr is not None}\nlengths = DataSetColumnar.get_shared_lengths(*all_input_data.values())\nself._inds_train, self._inds_valid = train_test_split(np.arange(lengths), test_size=frac_valid)\nself._train_input_data = {name: arr[self._inds_train] for name, arr in all_input_data.items()}\nself._valid_input_data = {name: arr[self._inds_valid] for name, arr in all_input_data.items()}\ntrain_dataset = DataSetColumnar(**self._train_input_data)\nvalid_dataset = DataSetColumnar(**self._valid_input_data)\nself.train_loader = data.DataLoader(train_dataset, batch_size, shuffle)\nvalid_batch_size = valid_batch_size or batch_size\nself.valid_loader = data.DataLoader(valid_dataset, valid_batch_size, shuffle)", "cats = np.arange(60).reshape((10, 6))\ny = np.arange(10)\nloaders = DataLoadersColumnar(cats=cats, y=y, batch_size=3)\nfor batch in loaders.train_loader:\n print(f'\\n train batch is [[{batch}]]')\nfor batch in loaders.valid_loader:\n print(f'\\n valid batch is [[{batch}]]')\nloaders"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self._shuffle = shuffle\n all_data: tp.Dict = dict(cats=cats, conts=conts, y=y)\n all_input_data = {name: arr for name, arr in all_data.items() if arr is not None}\n lengths = DataSetColumnar.get_shared_lengths(*all_input_data.values())\n self._inds_train, self._inds_valid = train_test_split(np.arange(lengths), test_size=frac_valid)\n self._train_input_data = {name: arr[self._inds_train] for name, arr in all_input_data.items()}\n self._valid_input_data = {name: arr[self._inds_valid] for name, arr in all_input_data.items()}\n train_dataset = DataSetColumnar(**self._train_input_data)\n valid_dataset = DataSetColumnar(**self._valid_input_data)\n self.train_loader = data.DataLoader(train_dataset, batch_size, shuffle)\n valid_batch_size = valid_batch_size or batch_size\n self.valid_loader = data.DataLoader(valid_dataset, valid_batch_size, shuffle)\n<|end_body_0|>\n\n<|body_start_1|>\n cats = np.arange(60).reshape((10, 6))\n y = np.arange(10)\n loaders = DataLoadersColumnar(cats=cats, y=y, batch_size=3)\n for batch in loaders.train_loader:\n print(f'\\n train batch is [[{batch}]]')\n for batch in loaders.valid_loader:\n print(f'\\n valid batch is [[{batch}]]')\n loaders\n<|end_body_1|>\n", "class_docstring": "Class for managing train/valid data loaders for columnar data", "class_name": "DataLoadersColumnar", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataLoadersColumnar:\n \"\"\"Class for managing train/valid data loaders for columnar data\"\"\"\n\n def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None:\n \"\"\":param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:\"\"\"\n <|body_0|>\n\n def TEST_DataLoadersColumnar():\n \"\"\"TEST\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._shuffle = shuffle\n all_data: tp.Dict = dict(cats=cats, conts=conts, y=y)\n all_input_data = {name: arr for name, arr in all_data.items() if arr is not None}\n lengths = DataSetColumnar.get_shared_lengths(*all_input_data.values())\n self._inds_train, self._inds_valid = train_test_split(np.arange(lengths), test_size=frac_valid)\n self._train_input_data = {name: arr[self._inds_train] for name, arr in all_input_data.items()}\n self._valid_input_data = {name: arr[self._inds_valid] for name, arr in all_input_data.items()}\n train_dataset = DataSetColumnar(**self._train_input_data)\n valid_dataset = DataSetColumnar(**self._valid_input_data)\n self.train_loader = data.DataLoader(train_dataset, batch_size, shuffle)\n valid_batch_size = valid_batch_size or batch_size\n self.valid_loader = data.DataLoader(valid_dataset, valid_batch_size, shuffle)\n<|end_body_0|>\n\n<|body_start_1|>\n cats = np.arange(60).reshape((10, 6))\n y = np.arange(10)\n loaders = DataLoadersColumnar(cats=cats, y=y, batch_size=3)\n for batch in loaders.train_loader:\n print(f'\\n train batch is [[{batch}]]')\n for batch in loaders.valid_loader:\n print(f'\\n valid batch is [[{batch}]]')\n loaders\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000141", "length_bytes": 3460, "license_type": "permissive", "methods": [{"docstring": ":param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:", "name": "__init__", "signature": "def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None"}, {"docstring": "TEST", "name": "TEST_DataLoadersColumnar", "signature": "def TEST_DataLoadersColumnar()"}], "n_methods": 2, "prompt": "Implement the Python class `DataLoadersColumnar` described below.\n\nClass description:\nClass for managing train/valid data loaders for columnar data\n\nMethod signatures and docstrings:\n- def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None: :param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:\n- def TEST_DataLoadersColumnar(): TEST", "prompted_full_text": "Implement the Python class `DataLoadersColumnar` described below.\n\nClass description:\nClass for managing train/valid data loaders for columnar data\n\nMethod signatures and docstrings:\n- def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None: :param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:\n- def TEST_DataLoadersColumnar(): TEST\n\n<|skeleton|>\nclass DataLoadersColumnar:\n \"\"\"Class for managing train/valid data loaders for columnar data\"\"\"\n\n def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None:\n \"\"\":param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:\"\"\"\n <|body_0|>\n\n def TEST_DataLoadersColumnar():\n \"\"\"TEST\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._shuffle = shuffle\n all_data: tp.Dict = dict(cats=cats, conts=conts, y=y)\n all_input_data = {name: arr for name, arr in all_data.items() if arr is not None}\n lengths = DataSetColumnar.get_shared_lengths(*all_input_data.values())\n self._inds_train, self._inds_valid = train_test_split(np.arange(lengths), test_size=frac_valid)\n self._train_input_data = {name: arr[self._inds_train] for name, arr in all_input_data.items()}\n self._valid_input_data = {name: arr[self._inds_valid] for name, arr in all_input_data.items()}\n train_dataset = DataSetColumnar(**self._train_input_data)\n valid_dataset = DataSetColumnar(**self._valid_input_data)\n self.train_loader = data.DataLoader(train_dataset, batch_size, shuffle)\n valid_batch_size = valid_batch_size or batch_size\n self.valid_loader = data.DataLoader(valid_dataset, valid_batch_size, shuffle)\n<|end_body_0|>\n\n<|body_start_1|>\n cats = np.arange(60).reshape((10, 6))\n y = np.arange(10)\n loaders = DataLoadersColumnar(cats=cats, y=y, batch_size=3)\n for batch in loaders.train_loader:\n print(f'\\n train batch is [[{batch}]]')\n for batch in loaders.valid_loader:\n print(f'\\n valid batch is [[{batch}]]')\n loaders\n<|end_body_1|>\n", "revision_id": "b7b020dee97230479a741e8d067b473957faa546", "skeleton": "<|skeleton|>\nclass DataLoadersColumnar:\n \"\"\"Class for managing train/valid data loaders for columnar data\"\"\"\n\n def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None:\n \"\"\":param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:\"\"\"\n <|body_0|>\n\n def TEST_DataLoadersColumnar():\n \"\"\"TEST\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DataLoadersColumnar:\n \"\"\"Class for managing train/valid data loaders for columnar data\"\"\"\n\n def __init__(self, frac_valid=0.2, cats: np.ndarray=None, conts: np.ndarray=None, y: np.ndarray=None, batch_size=1, valid_batch_size: int=None, shuffle=True) -> None:\n \"\"\":param cats: :param conts: :param y: :param batch_size: :param valid_batch_size: If \"None\", batch_size is used for the validation set data loader as well :param shuffle:\"\"\"\n super().__init__()\n self._shuffle = shuffle\n all_data: tp.Dict = dict(cats=cats, conts=conts, y=y)\n all_input_data = {name: arr for name, arr in all_data.items() if arr is not None}\n lengths = DataSetColumnar.get_shared_lengths(*all_input_data.values())\n self._inds_train, self._inds_valid = train_test_split(np.arange(lengths), test_size=frac_valid)\n self._train_input_data = {name: arr[self._inds_train] for name, arr in all_input_data.items()}\n self._valid_input_data = {name: arr[self._inds_valid] for name, arr in all_input_data.items()}\n train_dataset = DataSetColumnar(**self._train_input_data)\n valid_dataset = DataSetColumnar(**self._valid_input_data)\n self.train_loader = data.DataLoader(train_dataset, batch_size, shuffle)\n valid_batch_size = valid_batch_size or batch_size\n self.valid_loader = data.DataLoader(valid_dataset, valid_batch_size, shuffle)\n\n def TEST_DataLoadersColumnar():\n \"\"\"TEST\"\"\"\n cats = np.arange(60).reshape((10, 6))\n y = np.arange(10)\n loaders = DataLoadersColumnar(cats=cats, y=y, batch_size=3)\n for batch in loaders.train_loader:\n print(f'\\n train batch is [[{batch}]]')\n for batch in loaders.valid_loader:\n print(f'\\n valid batch is [[{batch}]]')\n loaders\n", "source": "the_stack_v2_python_sparse", "source_path": "bk_py_torch_libs/pt_data_2_loaders.py", "source_repo": "wbkdef/CodeSamples", "split": "test", "star_events_count": 0} {"blob_id": "42bdc6d18d3394296c471f8f39f4cd62f052516a", "bodies": ["super(MultiHeadDenseLayer, self).__init__()\nself._output_units = output_units\nself._num_heads = num_heads\nself._use_bias = use_bias\nself._is_output_transform = is_output_transform\nself._activation = activation\nself._activation_fn = get_activation(activation)\nself._flatten_output_units = tf.nest.flatten(self._output_units)\nif is_output_transform:\n assert not tf.nest.is_nested(self._output_units)\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, self._output_units)))\nelse:\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, sum(self._flatten_output_units))), requires_grad=True)\nif self._use_bias:\n self._bias = torch.nn.Parameter(torch.zeros(sum(self._flatten_output_units)), requires_grad=True)", "if self._is_output_transform:\n return [input_shape[-1] * input_shape[-2], self._output_units]\nreturn [input_shape[-1], sum(self._flatten_output_units)]", "if self._is_output_transform:\n return [self._num_heads, -1, self._output_units]\nreturn [-1, sum(self._flatten_output_units)]", "kernel = torch.reshape(self._kernel, self.kernel_shape)\nif self._is_output_transform:\n output = torch.einsum('abcd,cde->abe', inputs, kernel)\nelse:\n output = torch.einsum('abc,cd->abd', inputs, kernel)\nif self._use_bias:\n output += self._bias\nif not self._is_output_transform:\n output = torch.split(output, self._flatten_output_units, dim=-1)\n output = tf.nest.map_structure(lambda x, num_units: torch.reshape(x, list(x.size())[:-1] + [self._num_heads, num_units // self._num_heads]), output, self._flatten_output_units, check_types=False)\noutput = tf.nest.flatten(output)\nif self._activation_fn is not None:\n output = tf.nest.map_structure(self._activation_fn, output, check_types=False)\nreturn tf.nest.pack_sequence_as(self._output_units, output)"], "bodies_text": "<|body_start_0|>\n super(MultiHeadDenseLayer, self).__init__()\n self._output_units = output_units\n self._num_heads = num_heads\n self._use_bias = use_bias\n self._is_output_transform = is_output_transform\n self._activation = activation\n self._activation_fn = get_activation(activation)\n self._flatten_output_units = tf.nest.flatten(self._output_units)\n if is_output_transform:\n assert not tf.nest.is_nested(self._output_units)\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, self._output_units)))\n else:\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, sum(self._flatten_output_units))), requires_grad=True)\n if self._use_bias:\n self._bias = torch.nn.Parameter(torch.zeros(sum(self._flatten_output_units)), requires_grad=True)\n<|end_body_0|>\n\n<|body_start_1|>\n if self._is_output_transform:\n return [input_shape[-1] * input_shape[-2], self._output_units]\n return [input_shape[-1], sum(self._flatten_output_units)]\n<|end_body_1|>\n\n<|body_start_2|>\n if self._is_output_transform:\n return [self._num_heads, -1, self._output_units]\n return [-1, sum(self._flatten_output_units)]\n<|end_body_2|>\n\n<|body_start_3|>\n kernel = torch.reshape(self._kernel, self.kernel_shape)\n if self._is_output_transform:\n output = torch.einsum('abcd,cde->abe', inputs, kernel)\n else:\n output = torch.einsum('abc,cd->abd', inputs, kernel)\n if self._use_bias:\n output += self._bias\n if not self._is_output_transform:\n output = torch.split(output, self._flatten_output_units, dim=-1)\n output = tf.nest.map_structure(lambda x, num_units: torch.reshape(x, list(x.size())[:-1] + [self._num_heads, num_units // self._num_heads]), output, self._flatten_output_units, check_types=False)\n output = tf.nest.flatten(output)\n if self._activation_fn is not None:\n output = tf.nest.map_structure(self._activation_fn, output, check_types=False)\n return tf.nest.pack_sequence_as(self._output_units, output)\n<|end_body_3|>\n", "class_docstring": "Auto splitting or combining heads for the linear transformation.", "class_name": "MultiHeadDenseLayer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultiHeadDenseLayer:\n \"\"\"Auto splitting or combining heads for the linear transformation.\"\"\"\n\n def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False):\n \"\"\"Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.\"\"\"\n <|body_0|>\n\n def compat_kernel_shape(self, input_shape):\n \"\"\"Compatible kernel for variable storage.\"\"\"\n <|body_1|>\n\n def kernel_shape(self):\n \"\"\"The kernel shape.\"\"\"\n <|body_2|>\n\n def forward(self, inputs):\n \"\"\"Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiHeadDenseLayer, self).__init__()\n self._output_units = output_units\n self._num_heads = num_heads\n self._use_bias = use_bias\n self._is_output_transform = is_output_transform\n self._activation = activation\n self._activation_fn = get_activation(activation)\n self._flatten_output_units = tf.nest.flatten(self._output_units)\n if is_output_transform:\n assert not tf.nest.is_nested(self._output_units)\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, self._output_units)))\n else:\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, sum(self._flatten_output_units))), requires_grad=True)\n if self._use_bias:\n self._bias = torch.nn.Parameter(torch.zeros(sum(self._flatten_output_units)), requires_grad=True)\n<|end_body_0|>\n\n<|body_start_1|>\n if self._is_output_transform:\n return [input_shape[-1] * input_shape[-2], self._output_units]\n return [input_shape[-1], sum(self._flatten_output_units)]\n<|end_body_1|>\n\n<|body_start_2|>\n if self._is_output_transform:\n return [self._num_heads, -1, self._output_units]\n return [-1, sum(self._flatten_output_units)]\n<|end_body_2|>\n\n<|body_start_3|>\n kernel = torch.reshape(self._kernel, self.kernel_shape)\n if self._is_output_transform:\n output = torch.einsum('abcd,cde->abe', inputs, kernel)\n else:\n output = torch.einsum('abc,cd->abd', inputs, kernel)\n if self._use_bias:\n output += self._bias\n if not self._is_output_transform:\n output = torch.split(output, self._flatten_output_units, dim=-1)\n output = tf.nest.map_structure(lambda x, num_units: torch.reshape(x, list(x.size())[:-1] + [self._num_heads, num_units // self._num_heads]), output, self._flatten_output_units, check_types=False)\n output = tf.nest.flatten(output)\n if self._activation_fn is not None:\n output = tf.nest.map_structure(self._activation_fn, output, check_types=False)\n return tf.nest.pack_sequence_as(self._output_units, output)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000142", "length_bytes": 15012, "license_type": "permissive", "methods": [{"docstring": "Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.", "name": "__init__", "signature": "def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False)"}, {"docstring": "Compatible kernel for variable storage.", "name": "compat_kernel_shape", "signature": "def compat_kernel_shape(self, input_shape)"}, {"docstring": "The kernel shape.", "name": "kernel_shape", "signature": "def kernel_shape(self)"}, {"docstring": "Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].", "name": "forward", "signature": "def forward(self, inputs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_048393", "prompt": "Implement the Python class `MultiHeadDenseLayer` described below.\n\nClass description:\nAuto splitting or combining heads for the linear transformation.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False): Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.\n- def compat_kernel_shape(self, input_shape): Compatible kernel for variable storage.\n- def kernel_shape(self): The kernel shape.\n- def forward(self, inputs): Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].", "prompted_full_text": "Implement the Python class `MultiHeadDenseLayer` described below.\n\nClass description:\nAuto splitting or combining heads for the linear transformation.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False): Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.\n- def compat_kernel_shape(self, input_shape): Compatible kernel for variable storage.\n- def kernel_shape(self): The kernel shape.\n- def forward(self, inputs): Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].\n\n<|skeleton|>\nclass MultiHeadDenseLayer:\n \"\"\"Auto splitting or combining heads for the linear transformation.\"\"\"\n\n def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False):\n \"\"\"Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.\"\"\"\n <|body_0|>\n\n def compat_kernel_shape(self, input_shape):\n \"\"\"Compatible kernel for variable storage.\"\"\"\n <|body_1|>\n\n def kernel_shape(self):\n \"\"\"The kernel shape.\"\"\"\n <|body_2|>\n\n def forward(self, inputs):\n \"\"\"Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MultiHeadDenseLayer, self).__init__()\n self._output_units = output_units\n self._num_heads = num_heads\n self._use_bias = use_bias\n self._is_output_transform = is_output_transform\n self._activation = activation\n self._activation_fn = get_activation(activation)\n self._flatten_output_units = tf.nest.flatten(self._output_units)\n if is_output_transform:\n assert not tf.nest.is_nested(self._output_units)\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, self._output_units)))\n else:\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, sum(self._flatten_output_units))), requires_grad=True)\n if self._use_bias:\n self._bias = torch.nn.Parameter(torch.zeros(sum(self._flatten_output_units)), requires_grad=True)\n<|end_body_0|>\n\n<|body_start_1|>\n if self._is_output_transform:\n return [input_shape[-1] * input_shape[-2], self._output_units]\n return [input_shape[-1], sum(self._flatten_output_units)]\n<|end_body_1|>\n\n<|body_start_2|>\n if self._is_output_transform:\n return [self._num_heads, -1, self._output_units]\n return [-1, sum(self._flatten_output_units)]\n<|end_body_2|>\n\n<|body_start_3|>\n kernel = torch.reshape(self._kernel, self.kernel_shape)\n if self._is_output_transform:\n output = torch.einsum('abcd,cde->abe', inputs, kernel)\n else:\n output = torch.einsum('abc,cd->abd', inputs, kernel)\n if self._use_bias:\n output += self._bias\n if not self._is_output_transform:\n output = torch.split(output, self._flatten_output_units, dim=-1)\n output = tf.nest.map_structure(lambda x, num_units: torch.reshape(x, list(x.size())[:-1] + [self._num_heads, num_units // self._num_heads]), output, self._flatten_output_units, check_types=False)\n output = tf.nest.flatten(output)\n if self._activation_fn is not None:\n output = tf.nest.map_structure(self._activation_fn, output, check_types=False)\n return tf.nest.pack_sequence_as(self._output_units, output)\n<|end_body_3|>\n", "revision_id": "06613a99305f02312a0e64ee3c3c50e7b00dcf0e", "skeleton": "<|skeleton|>\nclass MultiHeadDenseLayer:\n \"\"\"Auto splitting or combining heads for the linear transformation.\"\"\"\n\n def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False):\n \"\"\"Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.\"\"\"\n <|body_0|>\n\n def compat_kernel_shape(self, input_shape):\n \"\"\"Compatible kernel for variable storage.\"\"\"\n <|body_1|>\n\n def kernel_shape(self):\n \"\"\"The kernel shape.\"\"\"\n <|body_2|>\n\n def forward(self, inputs):\n \"\"\"Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MultiHeadDenseLayer:\n \"\"\"Auto splitting or combining heads for the linear transformation.\"\"\"\n\n def __init__(self, input_size, output_units, num_heads, activation=None, use_bias=True, is_output_transform=False):\n \"\"\"Initializes MultiHeadDenseLayer. Args: input_size: The input dimension. output_units: A int scalar or int list, indicating the transformed output units. It must be a int scalar when `is_output_transform` is True. num_heads: The head num. activation: A string or a callable function for activation. use_bias: A boolean, whether to add bias tensor. is_output_transform: A boolean, whether to use this layer for the output transformation in multi head attention.\"\"\"\n super(MultiHeadDenseLayer, self).__init__()\n self._output_units = output_units\n self._num_heads = num_heads\n self._use_bias = use_bias\n self._is_output_transform = is_output_transform\n self._activation = activation\n self._activation_fn = get_activation(activation)\n self._flatten_output_units = tf.nest.flatten(self._output_units)\n if is_output_transform:\n assert not tf.nest.is_nested(self._output_units)\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, self._output_units)))\n else:\n self._kernel = torch.nn.Parameter(torch.nn.init.xavier_normal_(torch.empty(input_size, sum(self._flatten_output_units))), requires_grad=True)\n if self._use_bias:\n self._bias = torch.nn.Parameter(torch.zeros(sum(self._flatten_output_units)), requires_grad=True)\n\n def compat_kernel_shape(self, input_shape):\n \"\"\"Compatible kernel for variable storage.\"\"\"\n if self._is_output_transform:\n return [input_shape[-1] * input_shape[-2], self._output_units]\n return [input_shape[-1], sum(self._flatten_output_units)]\n\n def kernel_shape(self):\n \"\"\"The kernel shape.\"\"\"\n if self._is_output_transform:\n return [self._num_heads, -1, self._output_units]\n return [-1, sum(self._flatten_output_units)]\n\n def forward(self, inputs):\n \"\"\"Implements ``call()`` for MultiHeadDenseLayer. Args: inputs: A float tensor of shape [batch_size, length, hidden_size] when output_projection is False, otherwise a float tensor of shape [batch_size, length, num_heads, num_units_per_head]. Returns: The projected tensor with shape [batch_size, length, num_heads, num_units_per_head] per `self._output_units` when output_projection is False, otherwise [batch_size, length, output_units].\"\"\"\n kernel = torch.reshape(self._kernel, self.kernel_shape)\n if self._is_output_transform:\n output = torch.einsum('abcd,cde->abe', inputs, kernel)\n else:\n output = torch.einsum('abc,cd->abd', inputs, kernel)\n if self._use_bias:\n output += self._bias\n if not self._is_output_transform:\n output = torch.split(output, self._flatten_output_units, dim=-1)\n output = tf.nest.map_structure(lambda x, num_units: torch.reshape(x, list(x.size())[:-1] + [self._num_heads, num_units // self._num_heads]), output, self._flatten_output_units, check_types=False)\n output = tf.nest.flatten(output)\n if self._activation_fn is not None:\n output = tf.nest.map_structure(self._activation_fn, output, check_types=False)\n return tf.nest.pack_sequence_as(self._output_units, output)\n", "source": "the_stack_v2_python_sparse", "source_path": "neurst/neurst_pt/layers/common_layers.py", "source_repo": "ohlionel/Prune-Tune", "split": "test", "star_events_count": 12} {"blob_id": "04b1f7599c3db51cc0d3c3ba64029b3df9a8438e", "bodies": ["n = [i[:] for i in matrix]\nfor i in range(len(matrix)):\n for j in range(len(matrix[0])):\n m = len(matrix[0]) - 1 - i\n print(i, j, m)\n n[j][m] = matrix[i][j]\nreturn n", "matrix.reverse()\nprint(matrix)\nfor i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\nprint(matrix)", "for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\nprint(matrix)\nfor i in matrix:\n i.reverse()\nprint(matrix)"], "bodies_text": "<|body_start_0|>\n n = [i[:] for i in matrix]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n m = len(matrix[0]) - 1 - i\n print(i, j, m)\n n[j][m] = matrix[i][j]\n return n\n<|end_body_0|>\n\n<|body_start_1|>\n matrix.reverse()\n print(matrix)\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n<|end_body_1|>\n\n<|body_start_2|>\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n for i in matrix:\n i.reverse()\n print(matrix)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def rotate2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def rotate1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = [i[:] for i in matrix]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n m = len(matrix[0]) - 1 - i\n print(i, j, m)\n n[j][m] = matrix[i][j]\n return n\n<|end_body_0|>\n\n<|body_start_1|>\n matrix.reverse()\n print(matrix)\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n<|end_body_1|>\n\n<|body_start_2|>\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n for i in matrix:\n i.reverse()\n print(matrix)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000143", "length_bytes": 1405, "license_type": "no_license", "methods": [{"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "rotate2", "signature": "def rotate2(self, matrix)"}, {"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "rotate1", "signature": "def rotate1(self, matrix)"}, {"docstring": ":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "name": "rotate", "signature": "def rotate(self, matrix)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004722", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotate2(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def rotate1(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def rotate(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def rotate2(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def rotate1(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n- def rotate(self, matrix): :type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def rotate2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def rotate1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = [i[:] for i in matrix]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n m = len(matrix[0]) - 1 - i\n print(i, j, m)\n n[j][m] = matrix[i][j]\n return n\n<|end_body_0|>\n\n<|body_start_1|>\n matrix.reverse()\n print(matrix)\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n<|end_body_1|>\n\n<|body_start_2|>\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n for i in matrix:\n i.reverse()\n print(matrix)\n<|end_body_2|>\n", "revision_id": "f234bd7b62cb7bc2150faa764bf05a9095e19192", "skeleton": "<|skeleton|>\nclass Solution:\n\n def rotate2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_0|>\n\n def rotate1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_1|>\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def rotate2(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n n = [i[:] for i in matrix]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n m = len(matrix[0]) - 1 - i\n print(i, j, m)\n n[j][m] = matrix[i][j]\n return n\n\n def rotate1(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n matrix.reverse()\n print(matrix)\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n\n def rotate(self, matrix):\n \"\"\":type matrix: List[List[int]] :rtype: void Do not return anything, modify matrix in-place instead.\"\"\"\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n matrix[i][j], matrix[j][i] = (matrix[j][i], matrix[i][j])\n print(matrix)\n for i in matrix:\n i.reverse()\n print(matrix)\n", "source": "the_stack_v2_python_sparse", "source_path": "alg/rotate_image.py", "source_repo": "nyannko/leetcode-python", "split": "test", "star_events_count": 0} {"blob_id": "e3061201e1f5ca147839c1d508a14d9f2fb8156f", "bodies": ["super(EncoderDynamicFactorized, self).__init__()\nself.latent_size = latent_size\nself.hidden_size = hidden_size\nself.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)\nself.output_layer = tf.keras.layers.Dense(2 * latent_size)", "out = self.dense(inputs)\nout = self.output_layer(out)\nloc = out[..., :self.latent_size]\nscale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-05\nreturn tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)"], "bodies_text": "<|body_start_0|>\n super(EncoderDynamicFactorized, self).__init__()\n self.latent_size = latent_size\n self.hidden_size = hidden_size\n self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)\n self.output_layer = tf.keras.layers.Dense(2 * latent_size)\n<|end_body_0|>\n\n<|body_start_1|>\n out = self.dense(inputs)\n out = self.output_layer(out)\n loc = out[..., :self.latent_size]\n scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-05\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)\n<|end_body_1|>\n", "class_docstring": "Probabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```", "class_name": "EncoderDynamicFactorized", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EncoderDynamicFactorized:\n \"\"\"Probabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```\"\"\"\n\n def __init__(self, latent_size, hidden_size):\n \"\"\"Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncoderDynamicFactorized, self).__init__()\n self.latent_size = latent_size\n self.hidden_size = hidden_size\n self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)\n self.output_layer = tf.keras.layers.Dense(2 * latent_size)\n<|end_body_0|>\n\n<|body_start_1|>\n out = self.dense(inputs)\n out = self.output_layer(out)\n loc = out[..., :self.latent_size]\n scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-05\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000144", "length_bytes": 40214, "license_type": "permissive", "methods": [{"docstring": "Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.", "name": "__init__", "signature": "def __init__(self, latent_size, hidden_size)"}, {"docstring": "Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].", "name": "call", "signature": "def call(self, inputs)"}], "n_methods": 2, "prompt": "Implement the Python class `EncoderDynamicFactorized` described below.\n\nClass description:\nProbabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```\n\nMethod signatures and docstrings:\n- def __init__(self, latent_size, hidden_size): Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.\n- def call(self, inputs): Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].", "prompted_full_text": "Implement the Python class `EncoderDynamicFactorized` described below.\n\nClass description:\nProbabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```\n\nMethod signatures and docstrings:\n- def __init__(self, latent_size, hidden_size): Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.\n- def call(self, inputs): Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].\n\n<|skeleton|>\nclass EncoderDynamicFactorized:\n \"\"\"Probabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```\"\"\"\n\n def __init__(self, latent_size, hidden_size):\n \"\"\"Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncoderDynamicFactorized, self).__init__()\n self.latent_size = latent_size\n self.hidden_size = hidden_size\n self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)\n self.output_layer = tf.keras.layers.Dense(2 * latent_size)\n<|end_body_0|>\n\n<|body_start_1|>\n out = self.dense(inputs)\n out = self.output_layer(out)\n loc = out[..., :self.latent_size]\n scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-05\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)\n<|end_body_1|>\n", "revision_id": "8f727353954cea1199e5c59e2019f018a3da6601", "skeleton": "<|skeleton|>\nclass EncoderDynamicFactorized:\n \"\"\"Probabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```\"\"\"\n\n def __init__(self, latent_size, hidden_size):\n \"\"\"Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.\"\"\"\n <|body_0|>\n\n def call(self, inputs):\n \"\"\"Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EncoderDynamicFactorized:\n \"\"\"Probabilistic encoder for the time-variant latent variable `z_t`. The conditional distribution `q(z_t | x_t)` is a multivariate normal distribution on `R^{latent_size}` at each timestep `t`, conditioned on an intermediate representation of `x_t` from the convolutional encoder. The parameters are computed by a one-hidden layer neural net. In this formulation, we posit that the dynamic latent variable `z_t` is independent of static latent variable `f`. Together with the EncoderStatic class, we can formulate the factorized approximate latent posterior `q` inference (\"encoder\") model as ```none q(z_{1:T}, f | x_{1:T}) = q(f | x_{1:T}) prod_{t=1}^T q(z_t | x_t). ```\"\"\"\n\n def __init__(self, latent_size, hidden_size):\n \"\"\"Constructs a \"factorized\" encoder for `z_t`. Args: latent_size: An integer corresponding to the dimensionality of the distribution. hidden_size: Dimensionality of the affine function parameters.\"\"\"\n super(EncoderDynamicFactorized, self).__init__()\n self.latent_size = latent_size\n self.hidden_size = hidden_size\n self.dense = tf.keras.layers.Dense(hidden_size, activation=tf.nn.leaky_relu)\n self.output_layer = tf.keras.layers.Dense(2 * latent_size)\n\n def call(self, inputs):\n \"\"\"Runs the model to generate a distribution `q(z_{1:T} | x_{1:T})`. Args: inputs: A batch of intermediate representations of image frames across all timesteps, of shape [..., batch_size, timesteps, hidden_size]. Returns: A batch of MultivariateNormalDiag distributions with event shape [latent_size], batch shape [..., batch_size, timesteps], and sample shape [sample_shape, ..., batch_size, timesteps, latent_size].\"\"\"\n out = self.dense(inputs)\n out = self.output_layer(out)\n loc = out[..., :self.latent_size]\n scale_diag = tf.nn.softplus(out[..., self.latent_size:]) + 1e-05\n return tfd.MultivariateNormalDiag(loc=loc, scale_diag=scale_diag)\n", "source": "the_stack_v2_python_sparse", "source_path": "indl/old_reference/disentangled_vae_sprites.py", "source_repo": "SachsLab/indl", "split": "test", "star_events_count": 0} {"blob_id": "a636ab10d3ae523135a311cff300cc44d98ccba7", "bodies": ["if not root:\n return 0\nif not root.left and (not root.right):\n return 1\nq = [root]\nstep = 1\nwhile q:\n s = len(q)\n for i in range(s):\n cur = q.pop(0)\n if not cur.left and (not cur.right):\n return step\n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n step += 1\nreturn step", "if not root:\n return 0\nleft_min = self.minDepth(root.left)\nright_min = self.minDepth(root.right)\nif not root.left and root.right:\n return right_min + 1\nif root.left and (not root.right):\n return left_min + 1\nreturn 1 + min(left_min, right_min)"], "bodies_text": "<|body_start_0|>\n if not root:\n return 0\n if not root.left and (not root.right):\n return 1\n q = [root]\n step = 1\n while q:\n s = len(q)\n for i in range(s):\n cur = q.pop(0)\n if not cur.left and (not cur.right):\n return step\n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n step += 1\n return step\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n left_min = self.minDepth(root.left)\n right_min = self.minDepth(root.right)\n if not root.left and root.right:\n return right_min + 1\n if root.left and (not root.right):\n return left_min + 1\n return 1 + min(left_min, right_min)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minDepth1(self, root):\n \"\"\":type root: TreeNode :rtype: int :迭代\"\"\"\n <|body_0|>\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int :递归\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return 0\n if not root.left and (not root.right):\n return 1\n q = [root]\n step = 1\n while q:\n s = len(q)\n for i in range(s):\n cur = q.pop(0)\n if not cur.left and (not cur.right):\n return step\n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n step += 1\n return step\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n left_min = self.minDepth(root.left)\n right_min = self.minDepth(root.right)\n if not root.left and root.right:\n return right_min + 1\n if root.left and (not root.right):\n return left_min + 1\n return 1 + min(left_min, right_min)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000145", "length_bytes": 1888, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: int :迭代", "name": "minDepth1", "signature": "def minDepth1(self, root)"}, {"docstring": ":type root: TreeNode :rtype: int :递归", "name": "minDepth", "signature": "def minDepth(self, root)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_019536", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDepth1(self, root): :type root: TreeNode :rtype: int :迭代\n- def minDepth(self, root): :type root: TreeNode :rtype: int :递归", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDepth1(self, root): :type root: TreeNode :rtype: int :迭代\n- def minDepth(self, root): :type root: TreeNode :rtype: int :递归\n\n<|skeleton|>\nclass Solution:\n\n def minDepth1(self, root):\n \"\"\":type root: TreeNode :rtype: int :迭代\"\"\"\n <|body_0|>\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int :递归\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return 0\n if not root.left and (not root.right):\n return 1\n q = [root]\n step = 1\n while q:\n s = len(q)\n for i in range(s):\n cur = q.pop(0)\n if not cur.left and (not cur.right):\n return step\n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n step += 1\n return step\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n left_min = self.minDepth(root.left)\n right_min = self.minDepth(root.right)\n if not root.left and root.right:\n return right_min + 1\n if root.left and (not root.right):\n return left_min + 1\n return 1 + min(left_min, right_min)\n<|end_body_1|>\n", "revision_id": "6e18c5d257840489cc3fb1079ae3804c743982a4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minDepth1(self, root):\n \"\"\":type root: TreeNode :rtype: int :迭代\"\"\"\n <|body_0|>\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int :递归\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minDepth1(self, root):\n \"\"\":type root: TreeNode :rtype: int :迭代\"\"\"\n if not root:\n return 0\n if not root.left and (not root.right):\n return 1\n q = [root]\n step = 1\n while q:\n s = len(q)\n for i in range(s):\n cur = q.pop(0)\n if not cur.left and (not cur.right):\n return step\n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n step += 1\n return step\n\n def minDepth(self, root):\n \"\"\":type root: TreeNode :rtype: int :递归\"\"\"\n if not root:\n return 0\n left_min = self.minDepth(root.left)\n right_min = self.minDepth(root.right)\n if not root.left and root.right:\n return right_min + 1\n if root.left and (not root.right):\n return left_min + 1\n return 1 + min(left_min, right_min)\n", "source": "the_stack_v2_python_sparse", "source_path": "111.二叉树的最小深度.py", "source_repo": "yangyuxiang1996/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "c3a569abc61fdf6861eb8aa14c4e93c191548e44", "bodies": ["if nums == []:\n return 0\nj = 0\ncount = 1\nfor i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n nums[j] = nums[i]\n count = 1\nreturn j + 1", "if nums == []:\n return 0\nj = 0\ncount = 1\nfor i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n if i != j:\n nums[j] = nums[i]\n count = 1\nreturn j + 1"], "bodies_text": "<|body_start_0|>\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n nums[j] = nums[i]\n count = 1\n return j + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n if i != j:\n nums[j] = nums[i]\n count = 1\n return j + 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeDuplicates(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def removeDuplicates2(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n nums[j] = nums[i]\n count = 1\n return j + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n if i != j:\n nums[j] = nums[i]\n count = 1\n return j + 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000146", "length_bytes": 2170, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: int", "name": "removeDuplicates", "signature": "def removeDuplicates(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: int", "name": "removeDuplicates2", "signature": "def removeDuplicates2(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009862", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeDuplicates(self, nums): :type nums: List[int] :rtype: int\n- def removeDuplicates2(self, nums): :type nums: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeDuplicates(self, nums): :type nums: List[int] :rtype: int\n- def removeDuplicates2(self, nums): :type nums: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def removeDuplicates(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def removeDuplicates2(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n nums[j] = nums[i]\n count = 1\n return j + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n if i != j:\n nums[j] = nums[i]\n count = 1\n return j + 1\n<|end_body_1|>\n", "revision_id": "c55b0cfd2967a2221c27ed738e8de15034775945", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeDuplicates(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def removeDuplicates2(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def removeDuplicates(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n nums[j] = nums[i]\n count = 1\n return j + 1\n\n def removeDuplicates2(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if nums == []:\n return 0\n j = 0\n count = 1\n for i in range(1, len(nums)):\n if nums[i] == nums[j] and count < 2:\n j += 1\n nums[j] = nums[i]\n count += 1\n elif nums[i] != nums[j]:\n j += 1\n if i != j:\n nums[j] = nums[i]\n count = 1\n return j + 1\n", "source": "the_stack_v2_python_sparse", "source_path": "PycharmProjects/leetcode/UsingArray/MoveElements/removeDuplicates80.py", "source_repo": "crystal30/DataStructure", "split": "test", "star_events_count": 0} {"blob_id": "10a98b46ffbfd3b2994e83955fc9f6f30760b981", "bodies": ["with db.auto_commit():\n admin = Admin()\n admin.id = id\n admin.account = account\n admin.password = generate_password_hash(password)\n admin.auth = 1\n db.session.add(admin)", "admin = Admin.query.filter_by(account=account).first_or_404()\nif admin.auth == 1:\n society = Society.query.get_or_404(admin.id, description=u'该管理员账号对应的社团不存在')\n name = society.name\nelse:\n name = '超级管理员'\nif not check_password_hash(admin.password, pwd):\n raise AuthFailed('密码错误')\nreturn {'society_id': admin.id, 'scope': admin.auth, 'name': name}", "admin = Admin.query.filter_by(account=account).first_or_404()\nif not check_password_hash(admin.password, old_pwd):\n raise AuthFailed(msg=u'旧密码错误')\nadmin.password = generate_password_hash(new_pwd)\nadmin.update_time = admin.generate_datetime\ndb.session.commit()"], "bodies_text": "<|body_start_0|>\n with db.auto_commit():\n admin = Admin()\n admin.id = id\n admin.account = account\n admin.password = generate_password_hash(password)\n admin.auth = 1\n db.session.add(admin)\n<|end_body_0|>\n\n<|body_start_1|>\n admin = Admin.query.filter_by(account=account).first_or_404()\n if admin.auth == 1:\n society = Society.query.get_or_404(admin.id, description=u'该管理员账号对应的社团不存在')\n name = society.name\n else:\n name = '超级管理员'\n if not check_password_hash(admin.password, pwd):\n raise AuthFailed('密码错误')\n return {'society_id': admin.id, 'scope': admin.auth, 'name': name}\n<|end_body_1|>\n\n<|body_start_2|>\n admin = Admin.query.filter_by(account=account).first_or_404()\n if not check_password_hash(admin.password, old_pwd):\n raise AuthFailed(msg=u'旧密码错误')\n admin.password = generate_password_hash(new_pwd)\n admin.update_time = admin.generate_datetime\n db.session.commit()\n<|end_body_2|>\n", "class_docstring": "定义管理员模型", "class_name": "Admin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Admin:\n \"\"\"定义管理员模型\"\"\"\n\n def register(id, account, password):\n \"\"\"社团管理员注册\"\"\"\n <|body_0|>\n\n def login_verify(account, pwd):\n \"\"\"管理员登录验证\"\"\"\n <|body_1|>\n\n def change_password(account, old_pwd, new_pwd):\n \"\"\"修改密码\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with db.auto_commit():\n admin = Admin()\n admin.id = id\n admin.account = account\n admin.password = generate_password_hash(password)\n admin.auth = 1\n db.session.add(admin)\n<|end_body_0|>\n\n<|body_start_1|>\n admin = Admin.query.filter_by(account=account).first_or_404()\n if admin.auth == 1:\n society = Society.query.get_or_404(admin.id, description=u'该管理员账号对应的社团不存在')\n name = society.name\n else:\n name = '超级管理员'\n if not check_password_hash(admin.password, pwd):\n raise AuthFailed('密码错误')\n return {'society_id': admin.id, 'scope': admin.auth, 'name': name}\n<|end_body_1|>\n\n<|body_start_2|>\n admin = Admin.query.filter_by(account=account).first_or_404()\n if not check_password_hash(admin.password, old_pwd):\n raise AuthFailed(msg=u'旧密码错误')\n admin.password = generate_password_hash(new_pwd)\n admin.update_time = admin.generate_datetime\n db.session.commit()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000147", "length_bytes": 2074, "license_type": "permissive", "methods": [{"docstring": "社团管理员注册", "name": "register", "signature": "def register(id, account, password)"}, {"docstring": "管理员登录验证", "name": "login_verify", "signature": "def login_verify(account, pwd)"}, {"docstring": "修改密码", "name": "change_password", "signature": "def change_password(account, old_pwd, new_pwd)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_014663", "prompt": "Implement the Python class `Admin` described below.\n\nClass description:\n定义管理员模型\n\nMethod signatures and docstrings:\n- def register(id, account, password): 社团管理员注册\n- def login_verify(account, pwd): 管理员登录验证\n- def change_password(account, old_pwd, new_pwd): 修改密码", "prompted_full_text": "Implement the Python class `Admin` described below.\n\nClass description:\n定义管理员模型\n\nMethod signatures and docstrings:\n- def register(id, account, password): 社团管理员注册\n- def login_verify(account, pwd): 管理员登录验证\n- def change_password(account, old_pwd, new_pwd): 修改密码\n\n<|skeleton|>\nclass Admin:\n \"\"\"定义管理员模型\"\"\"\n\n def register(id, account, password):\n \"\"\"社团管理员注册\"\"\"\n <|body_0|>\n\n def login_verify(account, pwd):\n \"\"\"管理员登录验证\"\"\"\n <|body_1|>\n\n def change_password(account, old_pwd, new_pwd):\n \"\"\"修改密码\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n with db.auto_commit():\n admin = Admin()\n admin.id = id\n admin.account = account\n admin.password = generate_password_hash(password)\n admin.auth = 1\n db.session.add(admin)\n<|end_body_0|>\n\n<|body_start_1|>\n admin = Admin.query.filter_by(account=account).first_or_404()\n if admin.auth == 1:\n society = Society.query.get_or_404(admin.id, description=u'该管理员账号对应的社团不存在')\n name = society.name\n else:\n name = '超级管理员'\n if not check_password_hash(admin.password, pwd):\n raise AuthFailed('密码错误')\n return {'society_id': admin.id, 'scope': admin.auth, 'name': name}\n<|end_body_1|>\n\n<|body_start_2|>\n admin = Admin.query.filter_by(account=account).first_or_404()\n if not check_password_hash(admin.password, old_pwd):\n raise AuthFailed(msg=u'旧密码错误')\n admin.password = generate_password_hash(new_pwd)\n admin.update_time = admin.generate_datetime\n db.session.commit()\n<|end_body_2|>\n", "revision_id": "4640123bd8a660d50afa929e1897b92e2d21e796", "skeleton": "<|skeleton|>\nclass Admin:\n \"\"\"定义管理员模型\"\"\"\n\n def register(id, account, password):\n \"\"\"社团管理员注册\"\"\"\n <|body_0|>\n\n def login_verify(account, pwd):\n \"\"\"管理员登录验证\"\"\"\n <|body_1|>\n\n def change_password(account, old_pwd, new_pwd):\n \"\"\"修改密码\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Admin:\n \"\"\"定义管理员模型\"\"\"\n\n def register(id, account, password):\n \"\"\"社团管理员注册\"\"\"\n with db.auto_commit():\n admin = Admin()\n admin.id = id\n admin.account = account\n admin.password = generate_password_hash(password)\n admin.auth = 1\n db.session.add(admin)\n\n def login_verify(account, pwd):\n \"\"\"管理员登录验证\"\"\"\n admin = Admin.query.filter_by(account=account).first_or_404()\n if admin.auth == 1:\n society = Society.query.get_or_404(admin.id, description=u'该管理员账号对应的社团不存在')\n name = society.name\n else:\n name = '超级管理员'\n if not check_password_hash(admin.password, pwd):\n raise AuthFailed('密码错误')\n return {'society_id': admin.id, 'scope': admin.auth, 'name': name}\n\n def change_password(account, old_pwd, new_pwd):\n \"\"\"修改密码\"\"\"\n admin = Admin.query.filter_by(account=account).first_or_404()\n if not check_password_hash(admin.password, old_pwd):\n raise AuthFailed(msg=u'旧密码错误')\n admin.password = generate_password_hash(new_pwd)\n admin.update_time = admin.generate_datetime\n db.session.commit()\n", "source": "the_stack_v2_python_sparse", "source_path": "models/admin.py", "source_repo": "simple55-alt/apply-api", "split": "test", "star_events_count": 0} {"blob_id": "13fcb12fc88d63f5e3cacf3ab7117941c1e04caf", "bodies": ["self._geo_shape_ids = query.pop('geo_shape_ids', None)\nself._geom_search_class = geom_search_class\nfields = query.get('fields')\nif fields and self._geom_search_class:\n self._fetch_geoms = N.GEOM in fields and N.ID in fields\nelse:\n self._fetch_geoms = False\nsuper().__init__(index, query)", "super()._read_query(**kwargs)\nif ids:\n self._search = self._search.filter(_build_terms_query(N.ID, ids))\nif name:\n self._search = self._search.query(_build_name_query(N.NAME, name, exact))\nif geo_shape_geoms:\n self._search = self._search.query(_build_geo_query(N.GEOM, geoms=geo_shape_geoms))\nif census_locality:\n self._search = self._search.query(_build_subentity_query(N.CENSUS_LOCALITY_ID, N.CENSUS_LOCALITY_NAME, census_locality, exact))\nif municipality:\n self._search = self._search.query(_build_subentity_query(N.MUN_ID, N.MUN_NAME, municipality, exact))\nif department:\n self._search = self._search.query(_build_subentity_query(N.DEPT_ID, N.DEPT_NAME, department, exact))\nif state:\n self._search = self._search.query(_build_subentity_query(N.STATE_ID, N.STATE_NAME, state, exact))\nif order:\n if order == N.NAME:\n order = N.EXACT_SUFFIX.format(order)\n self._search = self._search.sort(order)", "if self._geo_shape_ids:\n yield from self._expand_intersection_query(self._geo_shape_ids)\nresponse = (yield self._search)\nself._result = ElasticsearchResult(response, self._offset)\nif self._fetch_geoms:\n yield from self._expand_geometry_query(self._geom_search_class)"], "bodies_text": "<|body_start_0|>\n self._geo_shape_ids = query.pop('geo_shape_ids', None)\n self._geom_search_class = geom_search_class\n fields = query.get('fields')\n if fields and self._geom_search_class:\n self._fetch_geoms = N.GEOM in fields and N.ID in fields\n else:\n self._fetch_geoms = False\n super().__init__(index, query)\n<|end_body_0|>\n\n<|body_start_1|>\n super()._read_query(**kwargs)\n if ids:\n self._search = self._search.filter(_build_terms_query(N.ID, ids))\n if name:\n self._search = self._search.query(_build_name_query(N.NAME, name, exact))\n if geo_shape_geoms:\n self._search = self._search.query(_build_geo_query(N.GEOM, geoms=geo_shape_geoms))\n if census_locality:\n self._search = self._search.query(_build_subentity_query(N.CENSUS_LOCALITY_ID, N.CENSUS_LOCALITY_NAME, census_locality, exact))\n if municipality:\n self._search = self._search.query(_build_subentity_query(N.MUN_ID, N.MUN_NAME, municipality, exact))\n if department:\n self._search = self._search.query(_build_subentity_query(N.DEPT_ID, N.DEPT_NAME, department, exact))\n if state:\n self._search = self._search.query(_build_subentity_query(N.STATE_ID, N.STATE_NAME, state, exact))\n if order:\n if order == N.NAME:\n order = N.EXACT_SUFFIX.format(order)\n self._search = self._search.sort(order)\n<|end_body_1|>\n\n<|body_start_2|>\n if self._geo_shape_ids:\n yield from self._expand_intersection_query(self._geo_shape_ids)\n response = (yield self._search)\n self._result = ElasticsearchResult(response, self._offset)\n if self._fetch_geoms:\n yield from self._expand_geometry_query(self._geom_search_class)\n<|end_body_2|>\n", "class_docstring": "Representa una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par", "class_name": "TerritoriesSearch", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TerritoriesSearch:\n \"\"\"Representa una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par\"\"\"\n\n def __init__(self, index, query, geom_search_class=None):\n \"\"\"Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.\"\"\"\n <|body_0|>\n\n def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs):\n \"\"\"Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom\"\"\"\n <|body_1|>\n\n def search_steps(self):\n \"\"\"Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._geo_shape_ids = query.pop('geo_shape_ids', None)\n self._geom_search_class = geom_search_class\n fields = query.get('fields')\n if fields and self._geom_search_class:\n self._fetch_geoms = N.GEOM in fields and N.ID in fields\n else:\n self._fetch_geoms = False\n super().__init__(index, query)\n<|end_body_0|>\n\n<|body_start_1|>\n super()._read_query(**kwargs)\n if ids:\n self._search = self._search.filter(_build_terms_query(N.ID, ids))\n if name:\n self._search = self._search.query(_build_name_query(N.NAME, name, exact))\n if geo_shape_geoms:\n self._search = self._search.query(_build_geo_query(N.GEOM, geoms=geo_shape_geoms))\n if census_locality:\n self._search = self._search.query(_build_subentity_query(N.CENSUS_LOCALITY_ID, N.CENSUS_LOCALITY_NAME, census_locality, exact))\n if municipality:\n self._search = self._search.query(_build_subentity_query(N.MUN_ID, N.MUN_NAME, municipality, exact))\n if department:\n self._search = self._search.query(_build_subentity_query(N.DEPT_ID, N.DEPT_NAME, department, exact))\n if state:\n self._search = self._search.query(_build_subentity_query(N.STATE_ID, N.STATE_NAME, state, exact))\n if order:\n if order == N.NAME:\n order = N.EXACT_SUFFIX.format(order)\n self._search = self._search.sort(order)\n<|end_body_1|>\n\n<|body_start_2|>\n if self._geo_shape_ids:\n yield from self._expand_intersection_query(self._geo_shape_ids)\n response = (yield self._search)\n self._result = ElasticsearchResult(response, self._offset)\n if self._fetch_geoms:\n yield from self._expand_geometry_query(self._geom_search_class)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000148", "length_bytes": 42920, "license_type": "permissive", "methods": [{"docstring": "Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.", "name": "__init__", "signature": "def __init__(self, index, query, geom_search_class=None)"}, {"docstring": "Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom", "name": "_read_query", "signature": "def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs)"}, {"docstring": "Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)", "name": "search_steps", "signature": "def search_steps(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001267", "prompt": "Implement the Python class `TerritoriesSearch` described below.\n\nClass description:\nRepresenta una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par\n\nMethod signatures and docstrings:\n- def __init__(self, index, query, geom_search_class=None): Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.\n- def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs): Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom\n- def search_steps(self): Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)", "prompted_full_text": "Implement the Python class `TerritoriesSearch` described below.\n\nClass description:\nRepresenta una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par\n\nMethod signatures and docstrings:\n- def __init__(self, index, query, geom_search_class=None): Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.\n- def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs): Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom\n- def search_steps(self): Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)\n\n<|skeleton|>\nclass TerritoriesSearch:\n \"\"\"Representa una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par\"\"\"\n\n def __init__(self, index, query, geom_search_class=None):\n \"\"\"Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.\"\"\"\n <|body_0|>\n\n def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs):\n \"\"\"Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom\"\"\"\n <|body_1|>\n\n def search_steps(self):\n \"\"\"Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._geo_shape_ids = query.pop('geo_shape_ids', None)\n self._geom_search_class = geom_search_class\n fields = query.get('fields')\n if fields and self._geom_search_class:\n self._fetch_geoms = N.GEOM in fields and N.ID in fields\n else:\n self._fetch_geoms = False\n super().__init__(index, query)\n<|end_body_0|>\n\n<|body_start_1|>\n super()._read_query(**kwargs)\n if ids:\n self._search = self._search.filter(_build_terms_query(N.ID, ids))\n if name:\n self._search = self._search.query(_build_name_query(N.NAME, name, exact))\n if geo_shape_geoms:\n self._search = self._search.query(_build_geo_query(N.GEOM, geoms=geo_shape_geoms))\n if census_locality:\n self._search = self._search.query(_build_subentity_query(N.CENSUS_LOCALITY_ID, N.CENSUS_LOCALITY_NAME, census_locality, exact))\n if municipality:\n self._search = self._search.query(_build_subentity_query(N.MUN_ID, N.MUN_NAME, municipality, exact))\n if department:\n self._search = self._search.query(_build_subentity_query(N.DEPT_ID, N.DEPT_NAME, department, exact))\n if state:\n self._search = self._search.query(_build_subentity_query(N.STATE_ID, N.STATE_NAME, state, exact))\n if order:\n if order == N.NAME:\n order = N.EXACT_SUFFIX.format(order)\n self._search = self._search.sort(order)\n<|end_body_1|>\n\n<|body_start_2|>\n if self._geo_shape_ids:\n yield from self._expand_intersection_query(self._geo_shape_ids)\n response = (yield self._search)\n self._result = ElasticsearchResult(response, self._offset)\n if self._fetch_geoms:\n yield from self._expand_geometry_query(self._geom_search_class)\n<|end_body_2|>\n", "revision_id": "cf044cdf4f22f77cd8d120dd0c6c377dd434d91d", "skeleton": "<|skeleton|>\nclass TerritoriesSearch:\n \"\"\"Representa una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par\"\"\"\n\n def __init__(self, index, query, geom_search_class=None):\n \"\"\"Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.\"\"\"\n <|body_0|>\n\n def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs):\n \"\"\"Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom\"\"\"\n <|body_1|>\n\n def search_steps(self):\n \"\"\"Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TerritoriesSearch:\n \"\"\"Representa una búsqueda de entidades territoriales (provincias, departamentos, etc.). Attributes: _geo_shape_ids (dict): Diccionario de str - list, las keys siendo tipos de entidades, y los valores siendo listas de IDs para el tipo de entidad. Se separa este atributo de los parámetros de búsqueda ya que requiere un manejo especial (requiere realizar consultas adicionales a otros índices). _geom_search_class (type): Clase que debería utilizarse para buscar geometrías para entidades de este TerritoriesSearch. Si es 'None', las geometrías simplemente pueden ser obtenidas agregando 'geometria' a la lista de campos. _fetch_geoms (bool): Verdadero si es necesario realizar consultas adicionales par\"\"\"\n\n def __init__(self, index, query, geom_search_class=None):\n \"\"\"Inicializa un objeto de tipo TerritoriesSearch. Args: index (str): Ver atributo '_index'. query (dict): Parámetros de la búsqueda. Ver el método '_read_query' para tomar nota de los valores permitidos dentro del diccionario. geom_search_class (type): Ver atributo '_geom_search_class'.\"\"\"\n self._geo_shape_ids = query.pop('geo_shape_ids', None)\n self._geom_search_class = geom_search_class\n fields = query.get('fields')\n if fields and self._geom_search_class:\n self._fetch_geoms = N.GEOM in fields and N.ID in fields\n else:\n self._fetch_geoms = False\n super().__init__(index, query)\n\n def _read_query(self, ids=None, name=None, census_locality=None, municipality=None, department=None, state=None, exact=False, geo_shape_geoms=None, order=None, **kwargs):\n \"\"\"Lee los parámetros de búsqueda recibidos y los agrega al atributo 'self._search'. Luego, invoca al método '_read_query' de la superclase con los parámetros que no fueron procesados. Args: ids (list): Filtrar por IDs de entidades. name (str): Filtrar por nombre de entidades. census_locality (list, str): Filtrar por nombre o IDs de localidades censales. municipality (list, str): Filtrar por nombre o IDs de municipios. department (list, str): Filtrar por nombre o IDs de departamentos. state (list, str): Filtrar por nombre o IDs de provincias. exact (bool): Si es verdadero, desactivar la búsqueda fuzzy para todos los parámetros de texto siendo utilizados (nombre, provincia, etc.). geo_shape_geom\"\"\"\n super()._read_query(**kwargs)\n if ids:\n self._search = self._search.filter(_build_terms_query(N.ID, ids))\n if name:\n self._search = self._search.query(_build_name_query(N.NAME, name, exact))\n if geo_shape_geoms:\n self._search = self._search.query(_build_geo_query(N.GEOM, geoms=geo_shape_geoms))\n if census_locality:\n self._search = self._search.query(_build_subentity_query(N.CENSUS_LOCALITY_ID, N.CENSUS_LOCALITY_NAME, census_locality, exact))\n if municipality:\n self._search = self._search.query(_build_subentity_query(N.MUN_ID, N.MUN_NAME, municipality, exact))\n if department:\n self._search = self._search.query(_build_subentity_query(N.DEPT_ID, N.DEPT_NAME, department, exact))\n if state:\n self._search = self._search.query(_build_subentity_query(N.STATE_ID, N.STATE_NAME, state, exact))\n if order:\n if order == N.NAME:\n order = N.EXACT_SUFFIX.format(order)\n self._search = self._search.sort(order)\n\n def search_steps(self):\n \"\"\"Ver documentación de 'ElasticsearchSearch.search_steps'. Pasos requeridos: 1) Expandir parámetros 'geo_shape_ids'. (opcional) 2) Buscar la entidad principal. 3) Obtener geometrías. (opcional)\"\"\"\n if self._geo_shape_ids:\n yield from self._expand_intersection_query(self._geo_shape_ids)\n response = (yield self._search)\n self._result = ElasticsearchResult(response, self._offset)\n if self._fetch_geoms:\n yield from self._expand_geometry_query(self._geom_search_class)\n", "source": "the_stack_v2_python_sparse", "source_path": "service/data.py", "source_repo": "datosgobar/georef-ar-api", "split": "test", "star_events_count": 122} {"blob_id": "665943cb6cad28ece2080e4cd084fb9b436ba927", "bodies": ["super().__init__()\nif lambda_ < 0:\n raise ValueError('Parameter[lambda_] must be non-negative.')\nself._lambda_ = lambda_\nif not isinstance(B, sp.csr_matrix):\n raise ValueError('Parameter[B] must be (N_px, N_px) CSR.')\nself._B = B\nif not isinstance(p, Parameter):\n raise ValueError('Parameter[p]: expected deepwave.nn.crnn.Parameter')\nself._p = p", "_, _, tau = self._p.decode(p)\nN_px = tau.size\nz = 0.5 * self._lambda_ / N_px * (tau @ self._B.dot(tau))\nreturn z", "_, _, tau = self._p.decode(p)\nN_px = tau.size\ndtau = self._lambda_ / N_px * self._B.T.dot(tau)\nz = self._p.encode(tau=dtau)\nreturn z"], "bodies_text": "<|body_start_0|>\n super().__init__()\n if lambda_ < 0:\n raise ValueError('Parameter[lambda_] must be non-negative.')\n self._lambda_ = lambda_\n if not isinstance(B, sp.csr_matrix):\n raise ValueError('Parameter[B] must be (N_px, N_px) CSR.')\n self._B = B\n if not isinstance(p, Parameter):\n raise ValueError('Parameter[p]: expected deepwave.nn.crnn.Parameter')\n self._p = p\n<|end_body_0|>\n\n<|body_start_1|>\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n z = 0.5 * self._lambda_ / N_px * (tau @ self._B.dot(tau))\n return z\n<|end_body_1|>\n\n<|body_start_2|>\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n dtau = self._lambda_ / N_px * self._B.T.dot(tau)\n z = self._p.encode(tau=dtau)\n return z\n<|end_body_2|>\n", "class_docstring": "Proxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}", "class_name": "LaplacianLossFunction", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LaplacianLossFunction:\n \"\"\"Proxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}\"\"\"\n\n def __init__(self, B, lambda_, p):\n \"\"\"Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.\"\"\"\n <|body_0|>\n\n def eval(self, p, x):\n \"\"\"Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)\"\"\"\n <|body_1|>\n\n def grad(self, p, x):\n \"\"\"Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if lambda_ < 0:\n raise ValueError('Parameter[lambda_] must be non-negative.')\n self._lambda_ = lambda_\n if not isinstance(B, sp.csr_matrix):\n raise ValueError('Parameter[B] must be (N_px, N_px) CSR.')\n self._B = B\n if not isinstance(p, Parameter):\n raise ValueError('Parameter[p]: expected deepwave.nn.crnn.Parameter')\n self._p = p\n<|end_body_0|>\n\n<|body_start_1|>\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n z = 0.5 * self._lambda_ / N_px * (tau @ self._B.dot(tau))\n return z\n<|end_body_1|>\n\n<|body_start_2|>\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n dtau = self._lambda_ / N_px * self._B.T.dot(tau)\n z = self._p.encode(tau=dtau)\n return z\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000149", "length_bytes": 22688, "license_type": "no_license", "methods": [{"docstring": "Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.", "name": "__init__", "signature": "def __init__(self, B, lambda_, p)"}, {"docstring": "Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)", "name": "eval", "signature": "def eval(self, p, x)"}, {"docstring": "Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}", "name": "grad", "signature": "def grad(self, p, x)"}], "n_methods": 3, "prompt": "Implement the Python class `LaplacianLossFunction` described below.\n\nClass description:\nProxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}\n\nMethod signatures and docstrings:\n- def __init__(self, B, lambda_, p): Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.\n- def eval(self, p, x): Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)\n- def grad(self, p, x): Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}", "prompted_full_text": "Implement the Python class `LaplacianLossFunction` described below.\n\nClass description:\nProxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}\n\nMethod signatures and docstrings:\n- def __init__(self, B, lambda_, p): Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.\n- def eval(self, p, x): Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)\n- def grad(self, p, x): Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}\n\n<|skeleton|>\nclass LaplacianLossFunction:\n \"\"\"Proxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}\"\"\"\n\n def __init__(self, B, lambda_, p):\n \"\"\"Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.\"\"\"\n <|body_0|>\n\n def eval(self, p, x):\n \"\"\"Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)\"\"\"\n <|body_1|>\n\n def grad(self, p, x):\n \"\"\"Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if lambda_ < 0:\n raise ValueError('Parameter[lambda_] must be non-negative.')\n self._lambda_ = lambda_\n if not isinstance(B, sp.csr_matrix):\n raise ValueError('Parameter[B] must be (N_px, N_px) CSR.')\n self._B = B\n if not isinstance(p, Parameter):\n raise ValueError('Parameter[p]: expected deepwave.nn.crnn.Parameter')\n self._p = p\n<|end_body_0|>\n\n<|body_start_1|>\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n z = 0.5 * self._lambda_ / N_px * (tau @ self._B.dot(tau))\n return z\n<|end_body_1|>\n\n<|body_start_2|>\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n dtau = self._lambda_ / N_px * self._B.T.dot(tau)\n z = self._p.encode(tau=dtau)\n return z\n<|end_body_2|>\n", "revision_id": "6bb46f330d9745d4a4871d412cd4f4730bbbcfdd", "skeleton": "<|skeleton|>\nclass LaplacianLossFunction:\n \"\"\"Proxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}\"\"\"\n\n def __init__(self, B, lambda_, p):\n \"\"\"Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.\"\"\"\n <|body_0|>\n\n def eval(self, p, x):\n \"\"\"Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)\"\"\"\n <|body_1|>\n\n def grad(self, p, x):\n \"\"\"Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LaplacianLossFunction:\n \"\"\"Proxy object to evaluate loss function. f(p) = \\\\frac{\\\\lambda_{2}}{2 N_{px}}\\\\norm{\\\\bbB^{1/2} \\\\bbtau(p)}{2}^{2}\"\"\"\n\n def __init__(self, B, lambda_, p):\n \"\"\"Parameters ---------- B : :py:class:`~scipy.sparse.csr_matrix` (N_px, N_px) Graph Laplacian. lambda_ : float Regularization parameter >= 0. p : :py:class:`~deepwave.nn.crnn.Parameter` Serializer to encode/decode parameters.\"\"\"\n super().__init__()\n if lambda_ < 0:\n raise ValueError('Parameter[lambda_] must be non-negative.')\n self._lambda_ = lambda_\n if not isinstance(B, sp.csr_matrix):\n raise ValueError('Parameter[B] must be (N_px, N_px) CSR.')\n self._B = B\n if not isinstance(p, Parameter):\n raise ValueError('Parameter[p]: expected deepwave.nn.crnn.Parameter')\n self._p = p\n\n def eval(self, p, x):\n \"\"\"Evaluate f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : float z = f(p)\"\"\"\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n z = 0.5 * self._lambda_ / N_px * (tau @ self._B.dot(tau))\n return z\n\n def grad(self, p, x):\n \"\"\"Evaluate \\\\grad_{p} f(p, x). Parameters ---------- p : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter encoding, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. x : :py:class:`~numpy.ndarray` ([N_sample,], N_cell_2) vectorized sample encoding, output of :py:meth:`~deepwave.nn.Sampler.encode`. Several samples can be provided if stacked along axis 0. This variable is not used in the implementation, but is provided for interface consistency. Returns ------- z : :py:class:`~numpy.ndarray` (N_cell_1,) vectorized parameter gradient, output of :py:meth:`~deepwave.nn.crnn.Parameter.encode`. z = \\\\grad_{p}{f(p)}\"\"\"\n _, _, tau = self._p.decode(p)\n N_px = tau.size\n dtau = self._lambda_ / N_px * self._B.T.dot(tau)\n z = self._p.encode(tau=dtau)\n return z\n", "source": "the_stack_v2_python_sparse", "source_path": "deepwave/nn/crnn.py", "source_repo": "imagingofthings/DeepWave", "split": "test", "star_events_count": 6} {"blob_id": "f89fbcf1de238dca6961797c28895316b636e362", "bodies": ["serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\nserv2 = Service_Type.objects.create(name='morethings', pretty_name='More Things', description='stuff')\nloc = Region.objects.create(name='place')\nloc.set_new_service(serv1, start_rate=0.5)\nloc.set_new_service(serv2, start_rate=0.5)\nself.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)", "serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\nloc = Region.objects.create(name='place')\nloc.set_new_service(serv1, start_rate=0.5)\nself.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\nrate = loc.get_service_by_type_name('things').get_current_rate()\nself.assertEquals(rate.rate, 0.5)", "loc = Region.objects.create(name='place')\nwith self.assertRaises(TypeError):\n loc.set_new_service(service_type=5, start_rate=0.5)\nwith self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate='not a number')\nwith self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate=-5)", "serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\nserv2 = Service_Type.objects.create(name='things2', pretty_name='things 2', description='stuff')\nloc = Region.objects.create(name='place')\nloc.set_new_service(serv1, start_rate=0.5)\nloc.set_new_service(serv2, start_rate=0.5)\nself.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\nself.assertEquals(loc.get_service_by_type_name('things2').service_type, serv2)", "loc = Region.objects.create(name='place')\nwith self.assertRaises(TypeError):\n loc.get_service_by_type_name(name=2)\nwith self.assertRaises(TypeError):\n loc.get_service_by_type_name(name='s p a c e ! ! ! !')"], "bodies_text": "<|body_start_0|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='morethings', pretty_name='More Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n<|end_body_0|>\n\n<|body_start_1|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n rate = loc.get_service_by_type_name('things').get_current_rate()\n self.assertEquals(rate.rate, 0.5)\n<|end_body_1|>\n\n<|body_start_2|>\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.set_new_service(service_type=5, start_rate=0.5)\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate='not a number')\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate=-5)\n<|end_body_2|>\n\n<|body_start_3|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='things2', pretty_name='things 2', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n self.assertEquals(loc.get_service_by_type_name('things2').service_type, serv2)\n<|end_body_3|>\n\n<|body_start_4|>\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name=2)\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name='s p a c e ! ! ! !')\n<|end_body_4|>\n", "class_docstring": "", "class_name": "RegionFunctionTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RegionFunctionTests:\n\n def test_set_new_service_1(self):\n \"\"\"This checks to make sure that you can set new services.\"\"\"\n <|body_0|>\n\n def test_set_new_service_2(self):\n \"\"\"Checks to make sure that the service added and the rate given, to it are the same as the ones returned.\"\"\"\n <|body_1|>\n\n def test_set_new_service_fail(self):\n \"\"\"Checks that the function fail if given parameters of the wrong types.\"\"\"\n <|body_2|>\n\n def test_get_service_by_type_name(self):\n \"\"\"Checks that the services being added are the same as the ones being returned.\"\"\"\n <|body_3|>\n\n def test_get_service_by_type_name_fail(self):\n \"\"\"Checks that the function fails if given parameters of the wrong types.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='morethings', pretty_name='More Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n<|end_body_0|>\n\n<|body_start_1|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n rate = loc.get_service_by_type_name('things').get_current_rate()\n self.assertEquals(rate.rate, 0.5)\n<|end_body_1|>\n\n<|body_start_2|>\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.set_new_service(service_type=5, start_rate=0.5)\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate='not a number')\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate=-5)\n<|end_body_2|>\n\n<|body_start_3|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='things2', pretty_name='things 2', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n self.assertEquals(loc.get_service_by_type_name('things2').service_type, serv2)\n<|end_body_3|>\n\n<|body_start_4|>\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name=2)\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name='s p a c e ! ! ! !')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000150", "length_bytes": 10508, "license_type": "no_license", "methods": [{"docstring": "This checks to make sure that you can set new services.", "name": "test_set_new_service_1", "signature": "def test_set_new_service_1(self)"}, {"docstring": "Checks to make sure that the service added and the rate given, to it are the same as the ones returned.", "name": "test_set_new_service_2", "signature": "def test_set_new_service_2(self)"}, {"docstring": "Checks that the function fail if given parameters of the wrong types.", "name": "test_set_new_service_fail", "signature": "def test_set_new_service_fail(self)"}, {"docstring": "Checks that the services being added are the same as the ones being returned.", "name": "test_get_service_by_type_name", "signature": "def test_get_service_by_type_name(self)"}, {"docstring": "Checks that the function fails if given parameters of the wrong types.", "name": "test_get_service_by_type_name_fail", "signature": "def test_get_service_by_type_name_fail(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_val_001293", "prompt": "Implement the Python class `RegionFunctionTests` described below.\n\nClass description:\nImplement the RegionFunctionTests class.\n\nMethod signatures and docstrings:\n- def test_set_new_service_1(self): This checks to make sure that you can set new services.\n- def test_set_new_service_2(self): Checks to make sure that the service added and the rate given, to it are the same as the ones returned.\n- def test_set_new_service_fail(self): Checks that the function fail if given parameters of the wrong types.\n- def test_get_service_by_type_name(self): Checks that the services being added are the same as the ones being returned.\n- def test_get_service_by_type_name_fail(self): Checks that the function fails if given parameters of the wrong types.", "prompted_full_text": "Implement the Python class `RegionFunctionTests` described below.\n\nClass description:\nImplement the RegionFunctionTests class.\n\nMethod signatures and docstrings:\n- def test_set_new_service_1(self): This checks to make sure that you can set new services.\n- def test_set_new_service_2(self): Checks to make sure that the service added and the rate given, to it are the same as the ones returned.\n- def test_set_new_service_fail(self): Checks that the function fail if given parameters of the wrong types.\n- def test_get_service_by_type_name(self): Checks that the services being added are the same as the ones being returned.\n- def test_get_service_by_type_name_fail(self): Checks that the function fails if given parameters of the wrong types.\n\n<|skeleton|>\nclass RegionFunctionTests:\n\n def test_set_new_service_1(self):\n \"\"\"This checks to make sure that you can set new services.\"\"\"\n <|body_0|>\n\n def test_set_new_service_2(self):\n \"\"\"Checks to make sure that the service added and the rate given, to it are the same as the ones returned.\"\"\"\n <|body_1|>\n\n def test_set_new_service_fail(self):\n \"\"\"Checks that the function fail if given parameters of the wrong types.\"\"\"\n <|body_2|>\n\n def test_get_service_by_type_name(self):\n \"\"\"Checks that the services being added are the same as the ones being returned.\"\"\"\n <|body_3|>\n\n def test_get_service_by_type_name_fail(self):\n \"\"\"Checks that the function fails if given parameters of the wrong types.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='morethings', pretty_name='More Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n<|end_body_0|>\n\n<|body_start_1|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n rate = loc.get_service_by_type_name('things').get_current_rate()\n self.assertEquals(rate.rate, 0.5)\n<|end_body_1|>\n\n<|body_start_2|>\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.set_new_service(service_type=5, start_rate=0.5)\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate='not a number')\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate=-5)\n<|end_body_2|>\n\n<|body_start_3|>\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='things2', pretty_name='things 2', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n self.assertEquals(loc.get_service_by_type_name('things2').service_type, serv2)\n<|end_body_3|>\n\n<|body_start_4|>\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name=2)\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name='s p a c e ! ! ! !')\n<|end_body_4|>\n", "revision_id": "d6b288e632ccfcd7c8e88ff1e5f496fb8e525710", "skeleton": "<|skeleton|>\nclass RegionFunctionTests:\n\n def test_set_new_service_1(self):\n \"\"\"This checks to make sure that you can set new services.\"\"\"\n <|body_0|>\n\n def test_set_new_service_2(self):\n \"\"\"Checks to make sure that the service added and the rate given, to it are the same as the ones returned.\"\"\"\n <|body_1|>\n\n def test_set_new_service_fail(self):\n \"\"\"Checks that the function fail if given parameters of the wrong types.\"\"\"\n <|body_2|>\n\n def test_get_service_by_type_name(self):\n \"\"\"Checks that the services being added are the same as the ones being returned.\"\"\"\n <|body_3|>\n\n def test_get_service_by_type_name_fail(self):\n \"\"\"Checks that the function fails if given parameters of the wrong types.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RegionFunctionTests:\n def test_set_new_service_1(self):\n \"\"\"This checks to make sure that you can set new services.\"\"\"\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='morethings', pretty_name='More Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n\n def test_set_new_service_2(self):\n \"\"\"Checks to make sure that the service added and the rate given, to it are the same as the ones returned.\"\"\"\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n rate = loc.get_service_by_type_name('things').get_current_rate()\n self.assertEquals(rate.rate, 0.5)\n\n def test_set_new_service_fail(self):\n \"\"\"Checks that the function fail if given parameters of the wrong types.\"\"\"\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.set_new_service(service_type=5, start_rate=0.5)\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate='not a number')\n with self.assertRaises(TypeError):\n loc.set_new_service(name='things', description='stuff', start_rate=-5)\n\n def test_get_service_by_type_name(self):\n \"\"\"Checks that the services being added are the same as the ones being returned.\"\"\"\n serv1 = Service_Type.objects.create(name='things', pretty_name='Things', description='stuff')\n serv2 = Service_Type.objects.create(name='things2', pretty_name='things 2', description='stuff')\n loc = Region.objects.create(name='place')\n loc.set_new_service(serv1, start_rate=0.5)\n loc.set_new_service(serv2, start_rate=0.5)\n self.assertEquals(loc.get_service_by_type_name('things').service_type, serv1)\n self.assertEquals(loc.get_service_by_type_name('things2').service_type, serv2)\n\n def test_get_service_by_type_name_fail(self):\n \"\"\"Checks that the function fails if given parameters of the wrong types.\"\"\"\n loc = Region.objects.create(name='place')\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name=2)\n with self.assertRaises(TypeError):\n loc.get_service_by_type_name(name='s p a c e ! ! ! !')\n", "source": "the_stack_v2_python_sparse", "source_path": "clerk-vagrant-test/Clerk/clerk/tests.py", "source_repo": "unexceptable/clerk", "split": "test", "star_events_count": 0} {"blob_id": "51bdd48550760c0fccfcbc24a8628aaa897fb415", "bodies": ["for p, res in self.basicOps:\n tree = rdparse.parse(p, matchers.matchinfo)\n self.assertEqual(str(tree), res)", "for p, res in self.basicOps:\n s1 = str(rdparse.parse(p, matchers.matchinfo))\n s2 = str(rdparse.parse(s1, matchers.matchinfo))\n self.assertEqual(s1, s2, '%s did not stabilize' % (p,))", "for testline in self.knownParseEvals:\n pstr = testline[0]\n pres = testline[1]\n root = rdparse.parse(pstr, matchers.matchinfo)\n self.assertEqual(str(root), pres)\n r2 = rdparse.parse(pres, matchers.matchinfo)\n self.assertEqual(str(r2), pres, '%s did not reparse identical' % (pres,))\n for ip, res in testline[2:]:\n hi = makehi(rip=ip)\n hi.addclass('frotz')\n self.assertEqual(root.eval(hi), res, '%s failed on host %s' % (pstr, ip))"], "bodies_text": "<|body_start_0|>\n for p, res in self.basicOps:\n tree = rdparse.parse(p, matchers.matchinfo)\n self.assertEqual(str(tree), res)\n<|end_body_0|>\n\n<|body_start_1|>\n for p, res in self.basicOps:\n s1 = str(rdparse.parse(p, matchers.matchinfo))\n s2 = str(rdparse.parse(s1, matchers.matchinfo))\n self.assertEqual(s1, s2, '%s did not stabilize' % (p,))\n<|end_body_1|>\n\n<|body_start_2|>\n for testline in self.knownParseEvals:\n pstr = testline[0]\n pres = testline[1]\n root = rdparse.parse(pstr, matchers.matchinfo)\n self.assertEqual(str(root), pres)\n r2 = rdparse.parse(pres, matchers.matchinfo)\n self.assertEqual(str(r2), pres, '%s did not reparse identical' % (pres,))\n for ip, res in testline[2:]:\n hi = makehi(rip=ip)\n hi.addclass('frotz')\n self.assertEqual(root.eval(hi), res, '%s failed on host %s' % (pstr, ip))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "testParsingEvaling", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass testParsingEvaling:\n\n def testParsedRepr(self):\n \"\"\"Test the ability of all matchers to be parsed and to report themselves.\"\"\"\n <|body_0|>\n\n def testStableRepr(self):\n \"\"\"Test that reparsing the string version of a matcher gets the same result.\"\"\"\n <|body_1|>\n\n def testParseEval(self):\n \"\"\"Test that we properly parse and evaluate known strings.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for p, res in self.basicOps:\n tree = rdparse.parse(p, matchers.matchinfo)\n self.assertEqual(str(tree), res)\n<|end_body_0|>\n\n<|body_start_1|>\n for p, res in self.basicOps:\n s1 = str(rdparse.parse(p, matchers.matchinfo))\n s2 = str(rdparse.parse(s1, matchers.matchinfo))\n self.assertEqual(s1, s2, '%s did not stabilize' % (p,))\n<|end_body_1|>\n\n<|body_start_2|>\n for testline in self.knownParseEvals:\n pstr = testline[0]\n pres = testline[1]\n root = rdparse.parse(pstr, matchers.matchinfo)\n self.assertEqual(str(root), pres)\n r2 = rdparse.parse(pres, matchers.matchinfo)\n self.assertEqual(str(r2), pres, '%s did not reparse identical' % (pres,))\n for ip, res in testline[2:]:\n hi = makehi(rip=ip)\n hi.addclass('frotz')\n self.assertEqual(root.eval(hi), res, '%s failed on host %s' % (pstr, ip))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000151", "length_bytes": 17813, "license_type": "no_license", "methods": [{"docstring": "Test the ability of all matchers to be parsed and to report themselves.", "name": "testParsedRepr", "signature": "def testParsedRepr(self)"}, {"docstring": "Test that reparsing the string version of a matcher gets the same result.", "name": "testStableRepr", "signature": "def testStableRepr(self)"}, {"docstring": "Test that we properly parse and evaluate known strings.", "name": "testParseEval", "signature": "def testParseEval(self)"}], "n_methods": 3, "prompt": "Implement the Python class `testParsingEvaling` described below.\n\nClass description:\nImplement the testParsingEvaling class.\n\nMethod signatures and docstrings:\n- def testParsedRepr(self): Test the ability of all matchers to be parsed and to report themselves.\n- def testStableRepr(self): Test that reparsing the string version of a matcher gets the same result.\n- def testParseEval(self): Test that we properly parse and evaluate known strings.", "prompted_full_text": "Implement the Python class `testParsingEvaling` described below.\n\nClass description:\nImplement the testParsingEvaling class.\n\nMethod signatures and docstrings:\n- def testParsedRepr(self): Test the ability of all matchers to be parsed and to report themselves.\n- def testStableRepr(self): Test that reparsing the string version of a matcher gets the same result.\n- def testParseEval(self): Test that we properly parse and evaluate known strings.\n\n<|skeleton|>\nclass testParsingEvaling:\n\n def testParsedRepr(self):\n \"\"\"Test the ability of all matchers to be parsed and to report themselves.\"\"\"\n <|body_0|>\n\n def testStableRepr(self):\n \"\"\"Test that reparsing the string version of a matcher gets the same result.\"\"\"\n <|body_1|>\n\n def testParseEval(self):\n \"\"\"Test that we properly parse and evaluate known strings.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for p, res in self.basicOps:\n tree = rdparse.parse(p, matchers.matchinfo)\n self.assertEqual(str(tree), res)\n<|end_body_0|>\n\n<|body_start_1|>\n for p, res in self.basicOps:\n s1 = str(rdparse.parse(p, matchers.matchinfo))\n s2 = str(rdparse.parse(s1, matchers.matchinfo))\n self.assertEqual(s1, s2, '%s did not stabilize' % (p,))\n<|end_body_1|>\n\n<|body_start_2|>\n for testline in self.knownParseEvals:\n pstr = testline[0]\n pres = testline[1]\n root = rdparse.parse(pstr, matchers.matchinfo)\n self.assertEqual(str(root), pres)\n r2 = rdparse.parse(pres, matchers.matchinfo)\n self.assertEqual(str(r2), pres, '%s did not reparse identical' % (pres,))\n for ip, res in testline[2:]:\n hi = makehi(rip=ip)\n hi.addclass('frotz')\n self.assertEqual(root.eval(hi), res, '%s failed on host %s' % (pstr, ip))\n<|end_body_2|>\n", "revision_id": "41341606e831a42ba36f8e64640e98f098bf0489", "skeleton": "<|skeleton|>\nclass testParsingEvaling:\n\n def testParsedRepr(self):\n \"\"\"Test the ability of all matchers to be parsed and to report themselves.\"\"\"\n <|body_0|>\n\n def testStableRepr(self):\n \"\"\"Test that reparsing the string version of a matcher gets the same result.\"\"\"\n <|body_1|>\n\n def testParseEval(self):\n \"\"\"Test that we properly parse and evaluate known strings.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class testParsingEvaling:\n def testParsedRepr(self):\n \"\"\"Test the ability of all matchers to be parsed and to report themselves.\"\"\"\n for p, res in self.basicOps:\n tree = rdparse.parse(p, matchers.matchinfo)\n self.assertEqual(str(tree), res)\n\n def testStableRepr(self):\n \"\"\"Test that reparsing the string version of a matcher gets the same result.\"\"\"\n for p, res in self.basicOps:\n s1 = str(rdparse.parse(p, matchers.matchinfo))\n s2 = str(rdparse.parse(s1, matchers.matchinfo))\n self.assertEqual(s1, s2, '%s did not stabilize' % (p,))\n\n def testParseEval(self):\n \"\"\"Test that we properly parse and evaluate known strings.\"\"\"\n for testline in self.knownParseEvals:\n pstr = testline[0]\n pres = testline[1]\n root = rdparse.parse(pstr, matchers.matchinfo)\n self.assertEqual(str(root), pres)\n r2 = rdparse.parse(pres, matchers.matchinfo)\n self.assertEqual(str(r2), pres, '%s did not reparse identical' % (pres,))\n for ip, res in testline[2:]:\n hi = makehi(rip=ip)\n hi.addclass('frotz')\n self.assertEqual(root.eval(hi), res, '%s failed on host %s' % (pstr, ip))\n", "source": "the_stack_v2_python_sparse", "source_path": "test_matchers.py", "source_repo": "siebenmann/portnanny", "split": "test", "star_events_count": 2} {"blob_id": "7937eab41ff5b687237bc2a7b6b9837ccb63c797", "bodies": ["super().__init__(**kwargs)\nself.problem_reference = GetClosestPersonOrRefillProblem\nself.problem = None", "grid, remaining_gas = perception\nplayer_values = [self.player_number, self.player_number + 7]\nfor i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] in player_values:\n return ((i, j), remaining_gas)\nreturn None", "self.initial_state = self.__state_from_perception(perception)\ngrid, _ = perception\nnew_grid = copy.deepcopy(grid)\nself.problem = problem(new_grid, self.initial_state, **kwargs)", "goals = self.problem.get_people_position()\nstate = node.state[0]\nbest_distance = util.INT_INFTY\nfor people in goals:\n manhattan = abs(state[0] - people[0]) + abs(state[1] - people[1])\n if manhattan < best_distance:\n best_distance = manhattan\nreturn best_distance", "self.start_agent(perception, self.problem_reference, tank_capacity=self.tank_capacity)\nnode = util.a_star(self.problem, self.manhattan_distance)\nif not node:\n return 'STOP'\naction = node.action\nlast_action = None\nwhile node.parent is not None:\n node = node.parent\n last_action = action\n action = node.action\nreturn last_action"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self.problem_reference = GetClosestPersonOrRefillProblem\n self.problem = None\n<|end_body_0|>\n\n<|body_start_1|>\n grid, remaining_gas = perception\n player_values = [self.player_number, self.player_number + 7]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] in player_values:\n return ((i, j), remaining_gas)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n self.initial_state = self.__state_from_perception(perception)\n grid, _ = perception\n new_grid = copy.deepcopy(grid)\n self.problem = problem(new_grid, self.initial_state, **kwargs)\n<|end_body_2|>\n\n<|body_start_3|>\n goals = self.problem.get_people_position()\n state = node.state[0]\n best_distance = util.INT_INFTY\n for people in goals:\n manhattan = abs(state[0] - people[0]) + abs(state[1] - people[1])\n if manhattan < best_distance:\n best_distance = manhattan\n return best_distance\n<|end_body_3|>\n\n<|body_start_4|>\n self.start_agent(perception, self.problem_reference, tank_capacity=self.tank_capacity)\n node = util.a_star(self.problem, self.manhattan_distance)\n if not node:\n return 'STOP'\n action = node.action\n last_action = None\n while node.parent is not None:\n node = node.parent\n last_action = action\n action = node.action\n return last_action\n<|end_body_4|>\n", "class_docstring": "Agent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to", "class_name": "GetClosestPersonOrRefillAgent", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GetClosestPersonOrRefillAgent:\n \"\"\"Agent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.\"\"\"\n <|body_0|>\n\n def __state_from_perception(self, perception):\n \"\"\"Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)\"\"\"\n <|body_1|>\n\n def start_agent(self, perception, problem, **kwargs):\n \"\"\"Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.\"\"\"\n <|body_2|>\n\n def manhattan_distance(self, node):\n \"\"\"Heuristic to be used by the A* algorithm\"\"\"\n <|body_3|>\n\n def get_action(self, perception):\n \"\"\"This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.problem_reference = GetClosestPersonOrRefillProblem\n self.problem = None\n<|end_body_0|>\n\n<|body_start_1|>\n grid, remaining_gas = perception\n player_values = [self.player_number, self.player_number + 7]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] in player_values:\n return ((i, j), remaining_gas)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n self.initial_state = self.__state_from_perception(perception)\n grid, _ = perception\n new_grid = copy.deepcopy(grid)\n self.problem = problem(new_grid, self.initial_state, **kwargs)\n<|end_body_2|>\n\n<|body_start_3|>\n goals = self.problem.get_people_position()\n state = node.state[0]\n best_distance = util.INT_INFTY\n for people in goals:\n manhattan = abs(state[0] - people[0]) + abs(state[1] - people[1])\n if manhattan < best_distance:\n best_distance = manhattan\n return best_distance\n<|end_body_3|>\n\n<|body_start_4|>\n self.start_agent(perception, self.problem_reference, tank_capacity=self.tank_capacity)\n node = util.a_star(self.problem, self.manhattan_distance)\n if not node:\n return 'STOP'\n action = node.action\n last_action = None\n while node.parent is not None:\n node = node.parent\n last_action = action\n action = node.action\n return last_action\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000152", "length_bytes": 44047, "license_type": "permissive", "methods": [{"docstring": "As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.", "name": "__init__", "signature": "def __init__(self, **kwargs)"}, {"docstring": "Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)", "name": "__state_from_perception", "signature": "def __state_from_perception(self, perception)"}, {"docstring": "Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.", "name": "start_agent", "signature": "def start_agent(self, perception, problem, **kwargs)"}, {"docstring": "Heuristic to be used by the A* algorithm", "name": "manhattan_distance", "signature": "def manhattan_distance(self, node)"}, {"docstring": "This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.", "name": "get_action", "signature": "def get_action(self, perception)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_test_000106", "prompt": "Implement the Python class `GetClosestPersonOrRefillAgent` described below.\n\nClass description:\nAgent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.\n- def __state_from_perception(self, perception): Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)\n- def start_agent(self, perception, problem, **kwargs): Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.\n- def manhattan_distance(self, node): Heuristic to be used by the A* algorithm\n- def get_action(self, perception): This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.", "prompted_full_text": "Implement the Python class `GetClosestPersonOrRefillAgent` described below.\n\nClass description:\nAgent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to\n\nMethod signatures and docstrings:\n- def __init__(self, **kwargs): As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.\n- def __state_from_perception(self, perception): Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)\n- def start_agent(self, perception, problem, **kwargs): Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.\n- def manhattan_distance(self, node): Heuristic to be used by the A* algorithm\n- def get_action(self, perception): This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.\n\n<|skeleton|>\nclass GetClosestPersonOrRefillAgent:\n \"\"\"Agent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.\"\"\"\n <|body_0|>\n\n def __state_from_perception(self, perception):\n \"\"\"Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)\"\"\"\n <|body_1|>\n\n def start_agent(self, perception, problem, **kwargs):\n \"\"\"Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.\"\"\"\n <|body_2|>\n\n def manhattan_distance(self, node):\n \"\"\"Heuristic to be used by the A* algorithm\"\"\"\n <|body_3|>\n\n def get_action(self, perception):\n \"\"\"This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self.problem_reference = GetClosestPersonOrRefillProblem\n self.problem = None\n<|end_body_0|>\n\n<|body_start_1|>\n grid, remaining_gas = perception\n player_values = [self.player_number, self.player_number + 7]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] in player_values:\n return ((i, j), remaining_gas)\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n self.initial_state = self.__state_from_perception(perception)\n grid, _ = perception\n new_grid = copy.deepcopy(grid)\n self.problem = problem(new_grid, self.initial_state, **kwargs)\n<|end_body_2|>\n\n<|body_start_3|>\n goals = self.problem.get_people_position()\n state = node.state[0]\n best_distance = util.INT_INFTY\n for people in goals:\n manhattan = abs(state[0] - people[0]) + abs(state[1] - people[1])\n if manhattan < best_distance:\n best_distance = manhattan\n return best_distance\n<|end_body_3|>\n\n<|body_start_4|>\n self.start_agent(perception, self.problem_reference, tank_capacity=self.tank_capacity)\n node = util.a_star(self.problem, self.manhattan_distance)\n if not node:\n return 'STOP'\n action = node.action\n last_action = None\n while node.parent is not None:\n node = node.parent\n last_action = action\n action = node.action\n return last_action\n<|end_body_4|>\n", "revision_id": "89b67b61817500aad359c64c7f43fcc2f1ef0698", "skeleton": "<|skeleton|>\nclass GetClosestPersonOrRefillAgent:\n \"\"\"Agent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.\"\"\"\n <|body_0|>\n\n def __state_from_perception(self, perception):\n \"\"\"Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)\"\"\"\n <|body_1|>\n\n def start_agent(self, perception, problem, **kwargs):\n \"\"\"Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.\"\"\"\n <|body_2|>\n\n def manhattan_distance(self, node):\n \"\"\"Heuristic to be used by the A* algorithm\"\"\"\n <|body_3|>\n\n def get_action(self, perception):\n \"\"\"This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GetClosestPersonOrRefillAgent:\n \"\"\"Agent Class that implements a planning agent The GetClosestPersonOrRefill class is a subclass of Agent that implements a specific agent whose objective is to collect the closest available person or refuelling, by using A* search with manhattan distance as heuristic. On each step this agent perform a new A* search until the problem reached a goal state. Also, it always process the actions in the following order: 'UP', 'RIGHT', 'DOWN', 'LEFT', 'REFILL', 'STOP' and in case of ties when computing the heuristic, it uses a FIFO tie-breaker heuristic to pop from the priority queue. .. notes:: The code below is provided to you as a guide and it is completely functional. Since you will only use it to\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"As stated before, all information that will be passed during the initialization is packed in the kwargs and, because of that, unless you decided to implement fancy attributes here, you can safely pass the kwargs dictionary directly to the superclass constructor. For pedagogical reasons, we decided that we will instatiate additional attributes here that will be later set via start_agent method.\"\"\"\n super().__init__(**kwargs)\n self.problem_reference = GetClosestPersonOrRefillProblem\n self.problem = None\n\n def __state_from_perception(self, perception):\n \"\"\"Private method to help to convert a perception into a state This is a helper method that converts a perception passed from the environment into a state to be used in the search problem. :param perception: The perception your agent acquires from the environment. :type perception: Problem dependent (for this programming assignment a tuple with the grid matrix and the remaining fuel for your agent) :return: A problem state according with your conception of problem. (E.g. for GetClosestPersonOrRefillProblem, we chose a state as a tuple with the agent coordinates and its remaining fuel)\"\"\"\n grid, remaining_gas = perception\n player_values = [self.player_number, self.player_number + 7]\n for i in range(len(grid)):\n for j in range(len(grid[0])):\n if grid[i][j] in player_values:\n return ((i, j), remaining_gas)\n return None\n\n def start_agent(self, perception, problem, **kwargs):\n \"\"\"Initialize all non-default attributes in the agent This is a helper method to allow the instantiation of all non-default attributes from this class. In this particular case, we are instantiating a search problem according with the perception and the specs provided.\"\"\"\n self.initial_state = self.__state_from_perception(perception)\n grid, _ = perception\n new_grid = copy.deepcopy(grid)\n self.problem = problem(new_grid, self.initial_state, **kwargs)\n\n def manhattan_distance(self, node):\n \"\"\"Heuristic to be used by the A* algorithm\"\"\"\n goals = self.problem.get_people_position()\n state = node.state[0]\n best_distance = util.INT_INFTY\n for people in goals:\n manhattan = abs(state[0] - people[0]) + abs(state[1] - people[1])\n if manhattan < best_distance:\n best_distance = manhattan\n return best_distance\n\n def get_action(self, perception):\n \"\"\"This is the main method for all your agents Along with the __init__, you must at least implement this method in all your agents to make them work properly. This method receives a perception from the environment and returns an action after performing the A* search with manhattan_distance as heuristics.\"\"\"\n self.start_agent(perception, self.problem_reference, tank_capacity=self.tank_capacity)\n node = util.a_star(self.problem, self.manhattan_distance)\n if not node:\n return 'STOP'\n action = node.action\n last_action = None\n while node.parent is not None:\n node = node.parent\n last_action = action\n action = node.action\n return last_action\n", "source": "the_stack_v2_python_sparse", "source_path": "EP2/ep2.py", "source_repo": "ricardokojo/MAC0425-2019", "split": "test", "star_events_count": 1} {"blob_id": "b9bd72fc1474847d19af88596afc3beaba1f8e83", "bodies": ["await ctx.message.delete()\ncode = ctx.message.content[len(ctx.prefix) + 3:]\ncode = code.replace('``', '`\\u200b`')\ncode = re.sub('\\n\\n+', '\\n\\n', code)\ncode = code.strip()\nlang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND (user_id=$2 OR user_id=$3)', ctx.guild.id, 0, ctx.author.id) or DEFAULT_LANG\ncode = '```{}\\n{}\\n```'.format(lang, code)\ncode += '*Paste by {0} - Click {1} to delete.*'.format(ctx.author.mention, DELETE_EMOJI)\nif len(code) > 2000:\n raise commands.CommandError('Code contents too long to paste.')\nmessage = await ctx.send(code)\nawait self.db.execute('INSERT INTO highlight_msg (guild_id, channel_id, user_id, message_id) VALUES ($1, $2, $3, $4)', ctx.guild.id, ctx.channel.id, ctx.author.id, message.id)\nawait message.add_reaction(DELETE_EMOJI)", "if payload.guild_id is None:\n return\nif str(payload.emoji) != DELETE_EMOJI or payload.user_id == self.bot.user.id:\n return\nif await self.db.execute('DELETE FROM highlight_msg WHERE user_id=$1 AND message_id=$2', payload.user_id, payload.message_id) == 'DELETE 0':\n return\nchannel = self.bot.get_channel(payload.channel_id)\nif channel is None:\n return\ntry:\n message = await channel.fetch_message(payload.message_id)\n await message.delete()\nexcept disnake.HTTPException:\n return", "if language is None:\n server_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n user_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n e = disnake.Embed(description='Do `.lang clear` to clear preference.')\n e.add_field(name='Server setting', value=f\"`{(DEFAULT_LANG + ' (default)' if server_lang is None else server_lang)}`\")\n e.add_field(name='Personal setting', value='Not set' if user_lang is None else f'`{user_lang}`')\n await ctx.send(embed=e)\n return\nif language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\nelse:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, ctx.author.id, language)\n await ctx.send(f\"Set your specific highlighting language to '{language}'.\")", "if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\nelse:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, 0, language)\n await ctx.send(f\"Set server-specific highlighting language to '{language}'.\")", "msg = 'To paste code snippets directly into the chat, use the highlight command:\\n```.hl *paste code here*```'\nif ctx.guild.id == AHK_GUILD_ID:\n msg += 'If you have a larger script you want to share, paste it to the AutoHotkey pastebin instead:\\nhttp://p.ahkscript.org/'\nawait ctx.send(msg)"], "bodies_text": "<|body_start_0|>\n await ctx.message.delete()\n code = ctx.message.content[len(ctx.prefix) + 3:]\n code = code.replace('``', '`\\u200b`')\n code = re.sub('\\n\\n+', '\\n\\n', code)\n code = code.strip()\n lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND (user_id=$2 OR user_id=$3)', ctx.guild.id, 0, ctx.author.id) or DEFAULT_LANG\n code = '```{}\\n{}\\n```'.format(lang, code)\n code += '*Paste by {0} - Click {1} to delete.*'.format(ctx.author.mention, DELETE_EMOJI)\n if len(code) > 2000:\n raise commands.CommandError('Code contents too long to paste.')\n message = await ctx.send(code)\n await self.db.execute('INSERT INTO highlight_msg (guild_id, channel_id, user_id, message_id) VALUES ($1, $2, $3, $4)', ctx.guild.id, ctx.channel.id, ctx.author.id, message.id)\n await message.add_reaction(DELETE_EMOJI)\n<|end_body_0|>\n\n<|body_start_1|>\n if payload.guild_id is None:\n return\n if str(payload.emoji) != DELETE_EMOJI or payload.user_id == self.bot.user.id:\n return\n if await self.db.execute('DELETE FROM highlight_msg WHERE user_id=$1 AND message_id=$2', payload.user_id, payload.message_id) == 'DELETE 0':\n return\n channel = self.bot.get_channel(payload.channel_id)\n if channel is None:\n return\n try:\n message = await channel.fetch_message(payload.message_id)\n await message.delete()\n except disnake.HTTPException:\n return\n<|end_body_1|>\n\n<|body_start_2|>\n if language is None:\n server_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n user_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n e = disnake.Embed(description='Do `.lang clear` to clear preference.')\n e.add_field(name='Server setting', value=f\"`{(DEFAULT_LANG + ' (default)' if server_lang is None else server_lang)}`\")\n e.add_field(name='Personal setting', value='Not set' if user_lang is None else f'`{user_lang}`')\n await ctx.send(embed=e)\n return\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, ctx.author.id, language)\n await ctx.send(f\"Set your specific highlighting language to '{language}'.\")\n<|end_body_2|>\n\n<|body_start_3|>\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, 0, language)\n await ctx.send(f\"Set server-specific highlighting language to '{language}'.\")\n<|end_body_3|>\n\n<|body_start_4|>\n msg = 'To paste code snippets directly into the chat, use the highlight command:\\n```.hl *paste code here*```'\n if ctx.guild.id == AHK_GUILD_ID:\n msg += 'If you have a larger script you want to share, paste it to the AutoHotkey pastebin instead:\\nhttp://p.ahkscript.org/'\n await ctx.send(msg)\n<|end_body_4|>\n", "class_docstring": "Create highlighted code-boxes with one command.", "class_name": "Highlighter", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Highlighter:\n \"\"\"Create highlighted code-boxes with one command.\"\"\"\n\n async def hl(self, ctx, *, code):\n \"\"\"Highlight some code.\"\"\"\n <|body_0|>\n\n async def on_raw_reaction_add(self, payload):\n \"\"\"Listens for raw reactions and removes a highlighted message if appropriate.\"\"\"\n <|body_1|>\n\n async def lang(self, ctx, *, language: lang_converter=None):\n \"\"\"Set your preferred highlighting language in this server.\"\"\"\n <|body_2|>\n\n async def serverlang(self, ctx, *, language: lang_converter):\n \"\"\"Set a guild-specific highlighting language. Can be overridden individually by users.\"\"\"\n <|body_3|>\n\n async def paste(self, ctx):\n \"\"\"Legacy, not removed because some people still use it instead of the newer tags in the tag system.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n await ctx.message.delete()\n code = ctx.message.content[len(ctx.prefix) + 3:]\n code = code.replace('``', '`\\u200b`')\n code = re.sub('\\n\\n+', '\\n\\n', code)\n code = code.strip()\n lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND (user_id=$2 OR user_id=$3)', ctx.guild.id, 0, ctx.author.id) or DEFAULT_LANG\n code = '```{}\\n{}\\n```'.format(lang, code)\n code += '*Paste by {0} - Click {1} to delete.*'.format(ctx.author.mention, DELETE_EMOJI)\n if len(code) > 2000:\n raise commands.CommandError('Code contents too long to paste.')\n message = await ctx.send(code)\n await self.db.execute('INSERT INTO highlight_msg (guild_id, channel_id, user_id, message_id) VALUES ($1, $2, $3, $4)', ctx.guild.id, ctx.channel.id, ctx.author.id, message.id)\n await message.add_reaction(DELETE_EMOJI)\n<|end_body_0|>\n\n<|body_start_1|>\n if payload.guild_id is None:\n return\n if str(payload.emoji) != DELETE_EMOJI or payload.user_id == self.bot.user.id:\n return\n if await self.db.execute('DELETE FROM highlight_msg WHERE user_id=$1 AND message_id=$2', payload.user_id, payload.message_id) == 'DELETE 0':\n return\n channel = self.bot.get_channel(payload.channel_id)\n if channel is None:\n return\n try:\n message = await channel.fetch_message(payload.message_id)\n await message.delete()\n except disnake.HTTPException:\n return\n<|end_body_1|>\n\n<|body_start_2|>\n if language is None:\n server_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n user_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n e = disnake.Embed(description='Do `.lang clear` to clear preference.')\n e.add_field(name='Server setting', value=f\"`{(DEFAULT_LANG + ' (default)' if server_lang is None else server_lang)}`\")\n e.add_field(name='Personal setting', value='Not set' if user_lang is None else f'`{user_lang}`')\n await ctx.send(embed=e)\n return\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, ctx.author.id, language)\n await ctx.send(f\"Set your specific highlighting language to '{language}'.\")\n<|end_body_2|>\n\n<|body_start_3|>\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, 0, language)\n await ctx.send(f\"Set server-specific highlighting language to '{language}'.\")\n<|end_body_3|>\n\n<|body_start_4|>\n msg = 'To paste code snippets directly into the chat, use the highlight command:\\n```.hl *paste code here*```'\n if ctx.guild.id == AHK_GUILD_ID:\n msg += 'If you have a larger script you want to share, paste it to the AutoHotkey pastebin instead:\\nhttp://p.ahkscript.org/'\n await ctx.send(msg)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000153", "length_bytes": 6795, "license_type": "permissive", "methods": [{"docstring": "Highlight some code.", "name": "hl", "signature": "async def hl(self, ctx, *, code)"}, {"docstring": "Listens for raw reactions and removes a highlighted message if appropriate.", "name": "on_raw_reaction_add", "signature": "async def on_raw_reaction_add(self, payload)"}, {"docstring": "Set your preferred highlighting language in this server.", "name": "lang", "signature": "async def lang(self, ctx, *, language: lang_converter=None)"}, {"docstring": "Set a guild-specific highlighting language. Can be overridden individually by users.", "name": "serverlang", "signature": "async def serverlang(self, ctx, *, language: lang_converter)"}, {"docstring": "Legacy, not removed because some people still use it instead of the newer tags in the tag system.", "name": "paste", "signature": "async def paste(self, ctx)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_021178", "prompt": "Implement the Python class `Highlighter` described below.\n\nClass description:\nCreate highlighted code-boxes with one command.\n\nMethod signatures and docstrings:\n- async def hl(self, ctx, *, code): Highlight some code.\n- async def on_raw_reaction_add(self, payload): Listens for raw reactions and removes a highlighted message if appropriate.\n- async def lang(self, ctx, *, language: lang_converter=None): Set your preferred highlighting language in this server.\n- async def serverlang(self, ctx, *, language: lang_converter): Set a guild-specific highlighting language. Can be overridden individually by users.\n- async def paste(self, ctx): Legacy, not removed because some people still use it instead of the newer tags in the tag system.", "prompted_full_text": "Implement the Python class `Highlighter` described below.\n\nClass description:\nCreate highlighted code-boxes with one command.\n\nMethod signatures and docstrings:\n- async def hl(self, ctx, *, code): Highlight some code.\n- async def on_raw_reaction_add(self, payload): Listens for raw reactions and removes a highlighted message if appropriate.\n- async def lang(self, ctx, *, language: lang_converter=None): Set your preferred highlighting language in this server.\n- async def serverlang(self, ctx, *, language: lang_converter): Set a guild-specific highlighting language. Can be overridden individually by users.\n- async def paste(self, ctx): Legacy, not removed because some people still use it instead of the newer tags in the tag system.\n\n<|skeleton|>\nclass Highlighter:\n \"\"\"Create highlighted code-boxes with one command.\"\"\"\n\n async def hl(self, ctx, *, code):\n \"\"\"Highlight some code.\"\"\"\n <|body_0|>\n\n async def on_raw_reaction_add(self, payload):\n \"\"\"Listens for raw reactions and removes a highlighted message if appropriate.\"\"\"\n <|body_1|>\n\n async def lang(self, ctx, *, language: lang_converter=None):\n \"\"\"Set your preferred highlighting language in this server.\"\"\"\n <|body_2|>\n\n async def serverlang(self, ctx, *, language: lang_converter):\n \"\"\"Set a guild-specific highlighting language. Can be overridden individually by users.\"\"\"\n <|body_3|>\n\n async def paste(self, ctx):\n \"\"\"Legacy, not removed because some people still use it instead of the newer tags in the tag system.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n await ctx.message.delete()\n code = ctx.message.content[len(ctx.prefix) + 3:]\n code = code.replace('``', '`\\u200b`')\n code = re.sub('\\n\\n+', '\\n\\n', code)\n code = code.strip()\n lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND (user_id=$2 OR user_id=$3)', ctx.guild.id, 0, ctx.author.id) or DEFAULT_LANG\n code = '```{}\\n{}\\n```'.format(lang, code)\n code += '*Paste by {0} - Click {1} to delete.*'.format(ctx.author.mention, DELETE_EMOJI)\n if len(code) > 2000:\n raise commands.CommandError('Code contents too long to paste.')\n message = await ctx.send(code)\n await self.db.execute('INSERT INTO highlight_msg (guild_id, channel_id, user_id, message_id) VALUES ($1, $2, $3, $4)', ctx.guild.id, ctx.channel.id, ctx.author.id, message.id)\n await message.add_reaction(DELETE_EMOJI)\n<|end_body_0|>\n\n<|body_start_1|>\n if payload.guild_id is None:\n return\n if str(payload.emoji) != DELETE_EMOJI or payload.user_id == self.bot.user.id:\n return\n if await self.db.execute('DELETE FROM highlight_msg WHERE user_id=$1 AND message_id=$2', payload.user_id, payload.message_id) == 'DELETE 0':\n return\n channel = self.bot.get_channel(payload.channel_id)\n if channel is None:\n return\n try:\n message = await channel.fetch_message(payload.message_id)\n await message.delete()\n except disnake.HTTPException:\n return\n<|end_body_1|>\n\n<|body_start_2|>\n if language is None:\n server_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n user_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n e = disnake.Embed(description='Do `.lang clear` to clear preference.')\n e.add_field(name='Server setting', value=f\"`{(DEFAULT_LANG + ' (default)' if server_lang is None else server_lang)}`\")\n e.add_field(name='Personal setting', value='Not set' if user_lang is None else f'`{user_lang}`')\n await ctx.send(embed=e)\n return\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, ctx.author.id, language)\n await ctx.send(f\"Set your specific highlighting language to '{language}'.\")\n<|end_body_2|>\n\n<|body_start_3|>\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, 0, language)\n await ctx.send(f\"Set server-specific highlighting language to '{language}'.\")\n<|end_body_3|>\n\n<|body_start_4|>\n msg = 'To paste code snippets directly into the chat, use the highlight command:\\n```.hl *paste code here*```'\n if ctx.guild.id == AHK_GUILD_ID:\n msg += 'If you have a larger script you want to share, paste it to the AutoHotkey pastebin instead:\\nhttp://p.ahkscript.org/'\n await ctx.send(msg)\n<|end_body_4|>\n", "revision_id": "b4640f633947f0a9fd902e8ddc95a4b667b85602", "skeleton": "<|skeleton|>\nclass Highlighter:\n \"\"\"Create highlighted code-boxes with one command.\"\"\"\n\n async def hl(self, ctx, *, code):\n \"\"\"Highlight some code.\"\"\"\n <|body_0|>\n\n async def on_raw_reaction_add(self, payload):\n \"\"\"Listens for raw reactions and removes a highlighted message if appropriate.\"\"\"\n <|body_1|>\n\n async def lang(self, ctx, *, language: lang_converter=None):\n \"\"\"Set your preferred highlighting language in this server.\"\"\"\n <|body_2|>\n\n async def serverlang(self, ctx, *, language: lang_converter):\n \"\"\"Set a guild-specific highlighting language. Can be overridden individually by users.\"\"\"\n <|body_3|>\n\n async def paste(self, ctx):\n \"\"\"Legacy, not removed because some people still use it instead of the newer tags in the tag system.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Highlighter:\n \"\"\"Create highlighted code-boxes with one command.\"\"\"\n\n async def hl(self, ctx, *, code):\n \"\"\"Highlight some code.\"\"\"\n await ctx.message.delete()\n code = ctx.message.content[len(ctx.prefix) + 3:]\n code = code.replace('``', '`\\u200b`')\n code = re.sub('\\n\\n+', '\\n\\n', code)\n code = code.strip()\n lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND (user_id=$2 OR user_id=$3)', ctx.guild.id, 0, ctx.author.id) or DEFAULT_LANG\n code = '```{}\\n{}\\n```'.format(lang, code)\n code += '*Paste by {0} - Click {1} to delete.*'.format(ctx.author.mention, DELETE_EMOJI)\n if len(code) > 2000:\n raise commands.CommandError('Code contents too long to paste.')\n message = await ctx.send(code)\n await self.db.execute('INSERT INTO highlight_msg (guild_id, channel_id, user_id, message_id) VALUES ($1, $2, $3, $4)', ctx.guild.id, ctx.channel.id, ctx.author.id, message.id)\n await message.add_reaction(DELETE_EMOJI)\n\n async def on_raw_reaction_add(self, payload):\n \"\"\"Listens for raw reactions and removes a highlighted message if appropriate.\"\"\"\n if payload.guild_id is None:\n return\n if str(payload.emoji) != DELETE_EMOJI or payload.user_id == self.bot.user.id:\n return\n if await self.db.execute('DELETE FROM highlight_msg WHERE user_id=$1 AND message_id=$2', payload.user_id, payload.message_id) == 'DELETE 0':\n return\n channel = self.bot.get_channel(payload.channel_id)\n if channel is None:\n return\n try:\n message = await channel.fetch_message(payload.message_id)\n await message.delete()\n except disnake.HTTPException:\n return\n\n async def lang(self, ctx, *, language: lang_converter=None):\n \"\"\"Set your preferred highlighting language in this server.\"\"\"\n if language is None:\n server_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n user_lang = await self.db.fetchval('SELECT lang FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n e = disnake.Embed(description='Do `.lang clear` to clear preference.')\n e.add_field(name='Server setting', value=f\"`{(DEFAULT_LANG + ' (default)' if server_lang is None else server_lang)}`\")\n e.add_field(name='Personal setting', value='Not set' if user_lang is None else f'`{user_lang}`')\n await ctx.send(embed=e)\n return\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, ctx.author.id)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, ctx.author.id, language)\n await ctx.send(f\"Set your specific highlighting language to '{language}'.\")\n\n async def serverlang(self, ctx, *, language: lang_converter):\n \"\"\"Set a guild-specific highlighting language. Can be overridden individually by users.\"\"\"\n if language == 'clear':\n ret = await self.db.execute('DELETE FROM highlight_lang WHERE guild_id=$1 AND user_id=$2', ctx.guild.id, 0)\n await ctx.send('No preference previously set' if ret == 'DELETE 0' else 'Preference cleared.')\n else:\n await self.db.execute('INSERT INTO highlight_lang (guild_id, user_id, lang) VALUES ($1, $2, $3) ON CONFLICT (guild_id, user_id) DO UPDATE SET lang=$3', ctx.guild.id, 0, language)\n await ctx.send(f\"Set server-specific highlighting language to '{language}'.\")\n\n async def paste(self, ctx):\n \"\"\"Legacy, not removed because some people still use it instead of the newer tags in the tag system.\"\"\"\n msg = 'To paste code snippets directly into the chat, use the highlight command:\\n```.hl *paste code here*```'\n if ctx.guild.id == AHK_GUILD_ID:\n msg += 'If you have a larger script you want to share, paste it to the AutoHotkey pastebin instead:\\nhttp://p.ahkscript.org/'\n await ctx.send(msg)\n", "source": "the_stack_v2_python_sparse", "source_path": "cogs/hl.py", "source_repo": "Run1e/AceBot", "split": "test", "star_events_count": 107} {"blob_id": "da93e0746aaedfb7d36cd653f0e250c0b86fe8cd", "bodies": ["Instrument.__init__(self, cle)\nself.emplacement = 'mains'\nself.positions = (1, 2)\nself.precision = 10\nself.calcul = 60\nself.etendre_editeur('r', 'précision', Uniligne, self, 'precision')\nself.etendre_editeur('ca', 'temps de calcul', Uniligne, self, 'calcul')", "precision = enveloppes['r']\nprecision.apercu = '{objet.precision}'\nprecision.prompt = 'Précision (en minutes) du sextant : '\nprecision.aide_courte = 'Entrez la |ent|précision|ff| du sextant, |cmd|1|ff| au minimum.\\nPlus le chiffre est bas, plus le sextant est précis.\\nNotez que le sextant est toujours précis en degrés, la précision\\nest en minutes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nPrécision actuelle : {objet.precision}'\nprecision.type = int\ncalcul = enveloppes['ca']\ncalcul.apercu = '{objet.calcul} secondes'\ncalcul.prompt = 'Temps de calcul nécessaire (en secondes) : '\ncalcul.aide_courte = 'Entrez le |ent|temps de calcul|ff| du sextant en secondes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nTemps actuelle : {objet.calcul} secondes'\ncalcul.type = int", "moi = Instrument.regarder(self, personnage)\npersonnage.envoyer_tip('Entrez la commande %point% pour faire le point.')\nreturn moi"], "bodies_text": "<|body_start_0|>\n Instrument.__init__(self, cle)\n self.emplacement = 'mains'\n self.positions = (1, 2)\n self.precision = 10\n self.calcul = 60\n self.etendre_editeur('r', 'précision', Uniligne, self, 'precision')\n self.etendre_editeur('ca', 'temps de calcul', Uniligne, self, 'calcul')\n<|end_body_0|>\n\n<|body_start_1|>\n precision = enveloppes['r']\n precision.apercu = '{objet.precision}'\n precision.prompt = 'Précision (en minutes) du sextant : '\n precision.aide_courte = 'Entrez la |ent|précision|ff| du sextant, |cmd|1|ff| au minimum.\\nPlus le chiffre est bas, plus le sextant est précis.\\nNotez que le sextant est toujours précis en degrés, la précision\\nest en minutes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nPrécision actuelle : {objet.precision}'\n precision.type = int\n calcul = enveloppes['ca']\n calcul.apercu = '{objet.calcul} secondes'\n calcul.prompt = 'Temps de calcul nécessaire (en secondes) : '\n calcul.aide_courte = 'Entrez le |ent|temps de calcul|ff| du sextant en secondes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nTemps actuelle : {objet.calcul} secondes'\n calcul.type = int\n<|end_body_1|>\n\n<|body_start_2|>\n moi = Instrument.regarder(self, personnage)\n personnage.envoyer_tip('Entrez la commande %point% pour faire le point.')\n return moi\n<|end_body_2|>\n", "class_docstring": "Type d'objet: sextant.", "class_name": "Sextant", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sextant:\n \"\"\"Type d'objet: sextant.\"\"\"\n\n def __init__(self, cle=''):\n \"\"\"Constructeur de l'objet\"\"\"\n <|body_0|>\n\n def travailler_enveloppes(self, enveloppes):\n \"\"\"Travail sur les enveloppes\"\"\"\n <|body_1|>\n\n def regarder(self, personnage):\n \"\"\"Quand on regarde la sextant.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Instrument.__init__(self, cle)\n self.emplacement = 'mains'\n self.positions = (1, 2)\n self.precision = 10\n self.calcul = 60\n self.etendre_editeur('r', 'précision', Uniligne, self, 'precision')\n self.etendre_editeur('ca', 'temps de calcul', Uniligne, self, 'calcul')\n<|end_body_0|>\n\n<|body_start_1|>\n precision = enveloppes['r']\n precision.apercu = '{objet.precision}'\n precision.prompt = 'Précision (en minutes) du sextant : '\n precision.aide_courte = 'Entrez la |ent|précision|ff| du sextant, |cmd|1|ff| au minimum.\\nPlus le chiffre est bas, plus le sextant est précis.\\nNotez que le sextant est toujours précis en degrés, la précision\\nest en minutes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nPrécision actuelle : {objet.precision}'\n precision.type = int\n calcul = enveloppes['ca']\n calcul.apercu = '{objet.calcul} secondes'\n calcul.prompt = 'Temps de calcul nécessaire (en secondes) : '\n calcul.aide_courte = 'Entrez le |ent|temps de calcul|ff| du sextant en secondes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nTemps actuelle : {objet.calcul} secondes'\n calcul.type = int\n<|end_body_1|>\n\n<|body_start_2|>\n moi = Instrument.regarder(self, personnage)\n personnage.envoyer_tip('Entrez la commande %point% pour faire le point.')\n return moi\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000154", "length_bytes": 3821, "license_type": "permissive", "methods": [{"docstring": "Constructeur de l'objet", "name": "__init__", "signature": "def __init__(self, cle='')"}, {"docstring": "Travail sur les enveloppes", "name": "travailler_enveloppes", "signature": "def travailler_enveloppes(self, enveloppes)"}, {"docstring": "Quand on regarde la sextant.", "name": "regarder", "signature": "def regarder(self, personnage)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_034335", "prompt": "Implement the Python class `Sextant` described below.\n\nClass description:\nType d'objet: sextant.\n\nMethod signatures and docstrings:\n- def __init__(self, cle=''): Constructeur de l'objet\n- def travailler_enveloppes(self, enveloppes): Travail sur les enveloppes\n- def regarder(self, personnage): Quand on regarde la sextant.", "prompted_full_text": "Implement the Python class `Sextant` described below.\n\nClass description:\nType d'objet: sextant.\n\nMethod signatures and docstrings:\n- def __init__(self, cle=''): Constructeur de l'objet\n- def travailler_enveloppes(self, enveloppes): Travail sur les enveloppes\n- def regarder(self, personnage): Quand on regarde la sextant.\n\n<|skeleton|>\nclass Sextant:\n \"\"\"Type d'objet: sextant.\"\"\"\n\n def __init__(self, cle=''):\n \"\"\"Constructeur de l'objet\"\"\"\n <|body_0|>\n\n def travailler_enveloppes(self, enveloppes):\n \"\"\"Travail sur les enveloppes\"\"\"\n <|body_1|>\n\n def regarder(self, personnage):\n \"\"\"Quand on regarde la sextant.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Instrument.__init__(self, cle)\n self.emplacement = 'mains'\n self.positions = (1, 2)\n self.precision = 10\n self.calcul = 60\n self.etendre_editeur('r', 'précision', Uniligne, self, 'precision')\n self.etendre_editeur('ca', 'temps de calcul', Uniligne, self, 'calcul')\n<|end_body_0|>\n\n<|body_start_1|>\n precision = enveloppes['r']\n precision.apercu = '{objet.precision}'\n precision.prompt = 'Précision (en minutes) du sextant : '\n precision.aide_courte = 'Entrez la |ent|précision|ff| du sextant, |cmd|1|ff| au minimum.\\nPlus le chiffre est bas, plus le sextant est précis.\\nNotez que le sextant est toujours précis en degrés, la précision\\nest en minutes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nPrécision actuelle : {objet.precision}'\n precision.type = int\n calcul = enveloppes['ca']\n calcul.apercu = '{objet.calcul} secondes'\n calcul.prompt = 'Temps de calcul nécessaire (en secondes) : '\n calcul.aide_courte = 'Entrez le |ent|temps de calcul|ff| du sextant en secondes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nTemps actuelle : {objet.calcul} secondes'\n calcul.type = int\n<|end_body_1|>\n\n<|body_start_2|>\n moi = Instrument.regarder(self, personnage)\n personnage.envoyer_tip('Entrez la commande %point% pour faire le point.')\n return moi\n<|end_body_2|>\n", "revision_id": "7e93bff08cdf891352efba587e89c40f3b4a2301", "skeleton": "<|skeleton|>\nclass Sextant:\n \"\"\"Type d'objet: sextant.\"\"\"\n\n def __init__(self, cle=''):\n \"\"\"Constructeur de l'objet\"\"\"\n <|body_0|>\n\n def travailler_enveloppes(self, enveloppes):\n \"\"\"Travail sur les enveloppes\"\"\"\n <|body_1|>\n\n def regarder(self, personnage):\n \"\"\"Quand on regarde la sextant.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Sextant:\n \"\"\"Type d'objet: sextant.\"\"\"\n\n def __init__(self, cle=''):\n \"\"\"Constructeur de l'objet\"\"\"\n Instrument.__init__(self, cle)\n self.emplacement = 'mains'\n self.positions = (1, 2)\n self.precision = 10\n self.calcul = 60\n self.etendre_editeur('r', 'précision', Uniligne, self, 'precision')\n self.etendre_editeur('ca', 'temps de calcul', Uniligne, self, 'calcul')\n\n def travailler_enveloppes(self, enveloppes):\n \"\"\"Travail sur les enveloppes\"\"\"\n precision = enveloppes['r']\n precision.apercu = '{objet.precision}'\n precision.prompt = 'Précision (en minutes) du sextant : '\n precision.aide_courte = 'Entrez la |ent|précision|ff| du sextant, |cmd|1|ff| au minimum.\\nPlus le chiffre est bas, plus le sextant est précis.\\nNotez que le sextant est toujours précis en degrés, la précision\\nest en minutes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nPrécision actuelle : {objet.precision}'\n precision.type = int\n calcul = enveloppes['ca']\n calcul.apercu = '{objet.calcul} secondes'\n calcul.prompt = 'Temps de calcul nécessaire (en secondes) : '\n calcul.aide_courte = 'Entrez le |ent|temps de calcul|ff| du sextant en secondes.\\nEntrez |cmd|/|ff| pour revenir à la fenêtre parente.\\n\\nTemps actuelle : {objet.calcul} secondes'\n calcul.type = int\n\n def regarder(self, personnage):\n \"\"\"Quand on regarde la sextant.\"\"\"\n moi = Instrument.regarder(self, personnage)\n personnage.envoyer_tip('Entrez la commande %point% pour faire le point.')\n return moi\n", "source": "the_stack_v2_python_sparse", "source_path": "src/secondaires/navigation/types/sextant.py", "source_repo": "vincent-lg/tsunami", "split": "test", "star_events_count": 5} {"blob_id": "78e536f74ebd843eef4d5393849e0c0c6ca0892b", "bodies": ["if owner:\n self._owner = owner\nelse:\n self._owner = owner_kwargs\nreturn self", "if url_type in ['execute', 'guess']:\n if 'user' in kwargs and self.can_execute(kwargs['user'], task):\n url_name = '{}:{}'.format(namespace, self.name)\n return reverse(url_name)\nreturn super(Start, self).get_task_url(task, url_type=url_type, namespace=namespace, **kwargs)", "if task and task.status != STATUS.NEW:\n return False\nfrom django.contrib.auth import get_user_model\nif self._owner:\n if callable(self._owner):\n return self._owner(user)\n else:\n owner = get_user_model()._default_manager.get(**self._owner)\n return owner == user\nelif self._owner_permission:\n obj = None\n if self._owner_permission_obj:\n if callable(self._owner_permission_obj):\n obj = self._owner_permission_obj()\n else:\n obj = self._owner_permission_obj\n return user.has_perm(self._owner_permission, obj=obj)\nelse:\n '\\n No restriction\\n '\n return True"], "bodies_text": "<|body_start_0|>\n if owner:\n self._owner = owner\n else:\n self._owner = owner_kwargs\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n if url_type in ['execute', 'guess']:\n if 'user' in kwargs and self.can_execute(kwargs['user'], task):\n url_name = '{}:{}'.format(namespace, self.name)\n return reverse(url_name)\n return super(Start, self).get_task_url(task, url_type=url_type, namespace=namespace, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if task and task.status != STATUS.NEW:\n return False\n from django.contrib.auth import get_user_model\n if self._owner:\n if callable(self._owner):\n return self._owner(user)\n else:\n owner = get_user_model()._default_manager.get(**self._owner)\n return owner == user\n elif self._owner_permission:\n obj = None\n if self._owner_permission_obj:\n if callable(self._owner_permission_obj):\n obj = self._owner_permission_obj()\n else:\n obj = self._owner_permission_obj\n return user.has_perm(self._owner_permission, obj=obj)\n else:\n '\\n No restriction\\n '\n return True\n<|end_body_2|>\n", "class_docstring": "User task that starts flow from a django view.", "class_name": "Start", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Start:\n \"\"\"User task that starts flow from a django view.\"\"\"\n\n def Available(self, owner=None, **owner_kwargs):\n \"\"\"Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)\"\"\"\n <|body_0|>\n\n def get_task_url(self, task, url_type='guess', namespace='', **kwargs):\n \"\"\"\"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.\"\"\"\n <|body_1|>\n\n def can_execute(self, user, task=None):\n \"\"\"Check user permission to start a flow.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if owner:\n self._owner = owner\n else:\n self._owner = owner_kwargs\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n if url_type in ['execute', 'guess']:\n if 'user' in kwargs and self.can_execute(kwargs['user'], task):\n url_name = '{}:{}'.format(namespace, self.name)\n return reverse(url_name)\n return super(Start, self).get_task_url(task, url_type=url_type, namespace=namespace, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if task and task.status != STATUS.NEW:\n return False\n from django.contrib.auth import get_user_model\n if self._owner:\n if callable(self._owner):\n return self._owner(user)\n else:\n owner = get_user_model()._default_manager.get(**self._owner)\n return owner == user\n elif self._owner_permission:\n obj = None\n if self._owner_permission_obj:\n if callable(self._owner_permission_obj):\n obj = self._owner_permission_obj()\n else:\n obj = self._owner_permission_obj\n return user.has_perm(self._owner_permission, obj=obj)\n else:\n '\\n No restriction\\n '\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000155", "length_bytes": 10811, "license_type": "permissive", "methods": [{"docstring": "Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)", "name": "Available", "signature": "def Available(self, owner=None, **owner_kwargs)"}, {"docstring": "\"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.", "name": "get_task_url", "signature": "def get_task_url(self, task, url_type='guess', namespace='', **kwargs)"}, {"docstring": "Check user permission to start a flow.", "name": "can_execute", "signature": "def can_execute(self, user, task=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_017146", "prompt": "Implement the Python class `Start` described below.\n\nClass description:\nUser task that starts flow from a django view.\n\nMethod signatures and docstrings:\n- def Available(self, owner=None, **owner_kwargs): Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)\n- def get_task_url(self, task, url_type='guess', namespace='', **kwargs): \"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.\n- def can_execute(self, user, task=None): Check user permission to start a flow.", "prompted_full_text": "Implement the Python class `Start` described below.\n\nClass description:\nUser task that starts flow from a django view.\n\nMethod signatures and docstrings:\n- def Available(self, owner=None, **owner_kwargs): Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)\n- def get_task_url(self, task, url_type='guess', namespace='', **kwargs): \"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.\n- def can_execute(self, user, task=None): Check user permission to start a flow.\n\n<|skeleton|>\nclass Start:\n \"\"\"User task that starts flow from a django view.\"\"\"\n\n def Available(self, owner=None, **owner_kwargs):\n \"\"\"Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)\"\"\"\n <|body_0|>\n\n def get_task_url(self, task, url_type='guess', namespace='', **kwargs):\n \"\"\"\"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.\"\"\"\n <|body_1|>\n\n def can_execute(self, user, task=None):\n \"\"\"Check user permission to start a flow.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if owner:\n self._owner = owner\n else:\n self._owner = owner_kwargs\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n if url_type in ['execute', 'guess']:\n if 'user' in kwargs and self.can_execute(kwargs['user'], task):\n url_name = '{}:{}'.format(namespace, self.name)\n return reverse(url_name)\n return super(Start, self).get_task_url(task, url_type=url_type, namespace=namespace, **kwargs)\n<|end_body_1|>\n\n<|body_start_2|>\n if task and task.status != STATUS.NEW:\n return False\n from django.contrib.auth import get_user_model\n if self._owner:\n if callable(self._owner):\n return self._owner(user)\n else:\n owner = get_user_model()._default_manager.get(**self._owner)\n return owner == user\n elif self._owner_permission:\n obj = None\n if self._owner_permission_obj:\n if callable(self._owner_permission_obj):\n obj = self._owner_permission_obj()\n else:\n obj = self._owner_permission_obj\n return user.has_perm(self._owner_permission, obj=obj)\n else:\n '\\n No restriction\\n '\n return True\n<|end_body_2|>\n", "revision_id": "0267168bb90e8e9c85aecdd715972b9622b82384", "skeleton": "<|skeleton|>\nclass Start:\n \"\"\"User task that starts flow from a django view.\"\"\"\n\n def Available(self, owner=None, **owner_kwargs):\n \"\"\"Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)\"\"\"\n <|body_0|>\n\n def get_task_url(self, task, url_type='guess', namespace='', **kwargs):\n \"\"\"\"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.\"\"\"\n <|body_1|>\n\n def can_execute(self, user, task=None):\n \"\"\"Check user permission to start a flow.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Start:\n \"\"\"User task that starts flow from a django view.\"\"\"\n\n def Available(self, owner=None, **owner_kwargs):\n \"\"\"Make process start action available for the User. Accepts user lookup kwargs or callable predicate :: User -> bool:: .Available(username='employee') .Available(lambda user: user.is_super_user)\"\"\"\n if owner:\n self._owner = owner\n else:\n self._owner = owner_kwargs\n return self\n\n def get_task_url(self, task, url_type='guess', namespace='', **kwargs):\n \"\"\"\"Handle url_Type='execute'. If url_type is 'guess' and task can be executed by user, the 'execute' url is returned.\"\"\"\n if url_type in ['execute', 'guess']:\n if 'user' in kwargs and self.can_execute(kwargs['user'], task):\n url_name = '{}:{}'.format(namespace, self.name)\n return reverse(url_name)\n return super(Start, self).get_task_url(task, url_type=url_type, namespace=namespace, **kwargs)\n\n def can_execute(self, user, task=None):\n \"\"\"Check user permission to start a flow.\"\"\"\n if task and task.status != STATUS.NEW:\n return False\n from django.contrib.auth import get_user_model\n if self._owner:\n if callable(self._owner):\n return self._owner(user)\n else:\n owner = get_user_model()._default_manager.get(**self._owner)\n return owner == user\n elif self._owner_permission:\n obj = None\n if self._owner_permission_obj:\n if callable(self._owner_permission_obj):\n obj = self._owner_permission_obj()\n else:\n obj = self._owner_permission_obj\n return user.has_perm(self._owner_permission, obj=obj)\n else:\n '\\n No restriction\\n '\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "Scripts/ict/viewflow/nodes/view.py", "source_repo": "mspgeek/Client_Portal", "split": "test", "star_events_count": 6} {"blob_id": "08a3ce112185fb41cd867609aec96e31002b6355", "bodies": ["super().__init__(*args, **kwargs)\ndb = model.SqlSession()\nurls, domains = get_start_urls(db)\nrandom.shuffle(urls)\nself.start_urls = urls\nself.extractor = LinkExtractor(allow_domains=domains, deny_domains=DENY_DOMAINS)\ndb.close()", "for link in self.extractor.extract_links(response):\n yield scrapy.Request(url=link.url)\nyield from items.load_emails(response)"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n db = model.SqlSession()\n urls, domains = get_start_urls(db)\n random.shuffle(urls)\n self.start_urls = urls\n self.extractor = LinkExtractor(allow_domains=domains, deny_domains=DENY_DOMAINS)\n db.close()\n<|end_body_0|>\n\n<|body_start_1|>\n for link in self.extractor.extract_links(response):\n yield scrapy.Request(url=link.url)\n yield from items.load_emails(response)\n<|end_body_1|>\n", "class_docstring": "searches our developer domains for emails", "class_name": "EmailSpider", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EmailSpider:\n \"\"\"searches our developer domains for emails\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"conencts to db\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"finds emails in a rudimentary but effective way using regular expressions\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n db = model.SqlSession()\n urls, domains = get_start_urls(db)\n random.shuffle(urls)\n self.start_urls = urls\n self.extractor = LinkExtractor(allow_domains=domains, deny_domains=DENY_DOMAINS)\n db.close()\n<|end_body_0|>\n\n<|body_start_1|>\n for link in self.extractor.extract_links(response):\n yield scrapy.Request(url=link.url)\n yield from items.load_emails(response)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000156", "length_bytes": 1536, "license_type": "permissive", "methods": [{"docstring": "conencts to db", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "finds emails in a rudimentary but effective way using regular expressions", "name": "parse", "signature": "def parse(self, response)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_043719", "prompt": "Implement the Python class `EmailSpider` described below.\n\nClass description:\nsearches our developer domains for emails\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): conencts to db\n- def parse(self, response): finds emails in a rudimentary but effective way using regular expressions", "prompted_full_text": "Implement the Python class `EmailSpider` described below.\n\nClass description:\nsearches our developer domains for emails\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): conencts to db\n- def parse(self, response): finds emails in a rudimentary but effective way using regular expressions\n\n<|skeleton|>\nclass EmailSpider:\n \"\"\"searches our developer domains for emails\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"conencts to db\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"finds emails in a rudimentary but effective way using regular expressions\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n db = model.SqlSession()\n urls, domains = get_start_urls(db)\n random.shuffle(urls)\n self.start_urls = urls\n self.extractor = LinkExtractor(allow_domains=domains, deny_domains=DENY_DOMAINS)\n db.close()\n<|end_body_0|>\n\n<|body_start_1|>\n for link in self.extractor.extract_links(response):\n yield scrapy.Request(url=link.url)\n yield from items.load_emails(response)\n<|end_body_1|>\n", "revision_id": "c0c38c7b02f41f482b01f145b0348ecbb82952a9", "skeleton": "<|skeleton|>\nclass EmailSpider:\n \"\"\"searches our developer domains for emails\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"conencts to db\"\"\"\n <|body_0|>\n\n def parse(self, response):\n \"\"\"finds emails in a rudimentary but effective way using regular expressions\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EmailSpider:\n \"\"\"searches our developer domains for emails\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"conencts to db\"\"\"\n super().__init__(*args, **kwargs)\n db = model.SqlSession()\n urls, domains = get_start_urls(db)\n random.shuffle(urls)\n self.start_urls = urls\n self.extractor = LinkExtractor(allow_domains=domains, deny_domains=DENY_DOMAINS)\n db.close()\n\n def parse(self, response):\n \"\"\"finds emails in a rudimentary but effective way using regular expressions\"\"\"\n for link in self.extractor.extract_links(response):\n yield scrapy.Request(url=link.url)\n yield from items.load_emails(response)\n", "source": "the_stack_v2_python_sparse", "source_path": "steam/spiders/email.py", "source_repo": "underscorenygren/slick", "split": "test", "star_events_count": 1} {"blob_id": "8669e8332a3a93ff5e04eb7d43fb7e2516a21c18", "bodies": ["def generator():\n primer = (yield)\n yield primer\nprime_true = chainlet.genlink.GeneratorLink(generator(), prime=True)\nself.assertEqual(prime_true.send('pingpong'), 'pingpong')\nprime_false = chainlet.genlink.GeneratorLink(generator(), prime=False)\nself.assertIsNone(next(prime_false))\nself.assertEqual(prime_false.send('pingpong'), 'pingpong')\nfor name, link in (('prime_true', prime_true), ('prime_false', prime_false)):\n with self.subTest(name=name):\n with self.assertRaises(StopIteration):\n next(link)", "@chainlet.genlet\ndef pingpong():\n last = (yield)\n while True:\n last = (yield last)\nwith self.subTest(case='close'):\n genlet = pingpong()\n genlet.close()\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\nwith self.subTest(case='throw'):\n genlet = pingpong()\n with self.assertRaises(GeneratorExit):\n genlet.throw(GeneratorExit)\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)", "@chainlet.genlet\ndef pingpong():\n last = (yield)\n while True:\n last = (yield last)\ntest_values = [0, 22, -22, 1000000.0, 'foobar'] + [random.random() for _ in range(20)]\nwith self.subTest(case='generator interface'):\n genlet = pingpong()\n for value in test_values:\n self.assertEqual(genlet.send(value), value)\n self.assertIsNone(next(genlet))\n self.assertEqual(genlet.send(value), value)\n self.assertEqual(genlet.send(value), genlet.slave.send(value))\nwith self.subTest(case='chain element'):\n chain = NamedChainlet('start') >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\nwith self.subTest(case='fill chain'):\n chain = NamedChainlet('start') >> pingpong() >> pingpong() >> pingpong() >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)", "for prime in (True, False):\n with self.subTest(prime=prime):\n\n @chainlet.genlet(prime)\n def prime_arg():\n primer = (yield)\n yield primer\n\n @chainlet.genlet(prime=prime)\n def prime_kwarg():\n primer = (yield)\n yield primer\n for genlet in (prime_arg, prime_kwarg):\n link = genlet()\n if not prime:\n self.assertIsNone(next(link))\n self.assertEqual(link.send('pingpong'), 'pingpong')\n with self.assertRaises(StopIteration):\n next(link)"], "bodies_text": "<|body_start_0|>\n def generator():\n primer = (yield)\n yield primer\n prime_true = chainlet.genlink.GeneratorLink(generator(), prime=True)\n self.assertEqual(prime_true.send('pingpong'), 'pingpong')\n prime_false = chainlet.genlink.GeneratorLink(generator(), prime=False)\n self.assertIsNone(next(prime_false))\n self.assertEqual(prime_false.send('pingpong'), 'pingpong')\n for name, link in (('prime_true', prime_true), ('prime_false', prime_false)):\n with self.subTest(name=name):\n with self.assertRaises(StopIteration):\n next(link)\n<|end_body_0|>\n\n<|body_start_1|>\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n with self.subTest(case='close'):\n genlet = pingpong()\n genlet.close()\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n with self.subTest(case='throw'):\n genlet = pingpong()\n with self.assertRaises(GeneratorExit):\n genlet.throw(GeneratorExit)\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n<|end_body_1|>\n\n<|body_start_2|>\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n test_values = [0, 22, -22, 1000000.0, 'foobar'] + [random.random() for _ in range(20)]\n with self.subTest(case='generator interface'):\n genlet = pingpong()\n for value in test_values:\n self.assertEqual(genlet.send(value), value)\n self.assertIsNone(next(genlet))\n self.assertEqual(genlet.send(value), value)\n self.assertEqual(genlet.send(value), genlet.slave.send(value))\n with self.subTest(case='chain element'):\n chain = NamedChainlet('start') >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n with self.subTest(case='fill chain'):\n chain = NamedChainlet('start') >> pingpong() >> pingpong() >> pingpong() >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n<|end_body_2|>\n\n<|body_start_3|>\n for prime in (True, False):\n with self.subTest(prime=prime):\n\n @chainlet.genlet(prime)\n def prime_arg():\n primer = (yield)\n yield primer\n\n @chainlet.genlet(prime=prime)\n def prime_kwarg():\n primer = (yield)\n yield primer\n for genlet in (prime_arg, prime_kwarg):\n link = genlet()\n if not prime:\n self.assertIsNone(next(link))\n self.assertEqual(link.send('pingpong'), 'pingpong')\n with self.assertRaises(StopIteration):\n next(link)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "TestGeneratorLink", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestGeneratorLink:\n\n def test_prime(self):\n \"\"\"Prime generator for use\"\"\"\n <|body_0|>\n\n def test_close(self):\n \"\"\"Release underlying generator\"\"\"\n <|body_1|>\n\n def test_linklet(self):\n \"\"\"Chainlink via decorator\"\"\"\n <|body_2|>\n\n def test_prime_linklet(self):\n \"\"\"Prime genlet for use\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def generator():\n primer = (yield)\n yield primer\n prime_true = chainlet.genlink.GeneratorLink(generator(), prime=True)\n self.assertEqual(prime_true.send('pingpong'), 'pingpong')\n prime_false = chainlet.genlink.GeneratorLink(generator(), prime=False)\n self.assertIsNone(next(prime_false))\n self.assertEqual(prime_false.send('pingpong'), 'pingpong')\n for name, link in (('prime_true', prime_true), ('prime_false', prime_false)):\n with self.subTest(name=name):\n with self.assertRaises(StopIteration):\n next(link)\n<|end_body_0|>\n\n<|body_start_1|>\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n with self.subTest(case='close'):\n genlet = pingpong()\n genlet.close()\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n with self.subTest(case='throw'):\n genlet = pingpong()\n with self.assertRaises(GeneratorExit):\n genlet.throw(GeneratorExit)\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n<|end_body_1|>\n\n<|body_start_2|>\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n test_values = [0, 22, -22, 1000000.0, 'foobar'] + [random.random() for _ in range(20)]\n with self.subTest(case='generator interface'):\n genlet = pingpong()\n for value in test_values:\n self.assertEqual(genlet.send(value), value)\n self.assertIsNone(next(genlet))\n self.assertEqual(genlet.send(value), value)\n self.assertEqual(genlet.send(value), genlet.slave.send(value))\n with self.subTest(case='chain element'):\n chain = NamedChainlet('start') >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n with self.subTest(case='fill chain'):\n chain = NamedChainlet('start') >> pingpong() >> pingpong() >> pingpong() >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n<|end_body_2|>\n\n<|body_start_3|>\n for prime in (True, False):\n with self.subTest(prime=prime):\n\n @chainlet.genlet(prime)\n def prime_arg():\n primer = (yield)\n yield primer\n\n @chainlet.genlet(prime=prime)\n def prime_kwarg():\n primer = (yield)\n yield primer\n for genlet in (prime_arg, prime_kwarg):\n link = genlet()\n if not prime:\n self.assertIsNone(next(link))\n self.assertEqual(link.send('pingpong'), 'pingpong')\n with self.assertRaises(StopIteration):\n next(link)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000157", "length_bytes": 6919, "license_type": "permissive", "methods": [{"docstring": "Prime generator for use", "name": "test_prime", "signature": "def test_prime(self)"}, {"docstring": "Release underlying generator", "name": "test_close", "signature": "def test_close(self)"}, {"docstring": "Chainlink via decorator", "name": "test_linklet", "signature": "def test_linklet(self)"}, {"docstring": "Prime genlet for use", "name": "test_prime_linklet", "signature": "def test_prime_linklet(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000464", "prompt": "Implement the Python class `TestGeneratorLink` described below.\n\nClass description:\nImplement the TestGeneratorLink class.\n\nMethod signatures and docstrings:\n- def test_prime(self): Prime generator for use\n- def test_close(self): Release underlying generator\n- def test_linklet(self): Chainlink via decorator\n- def test_prime_linklet(self): Prime genlet for use", "prompted_full_text": "Implement the Python class `TestGeneratorLink` described below.\n\nClass description:\nImplement the TestGeneratorLink class.\n\nMethod signatures and docstrings:\n- def test_prime(self): Prime generator for use\n- def test_close(self): Release underlying generator\n- def test_linklet(self): Chainlink via decorator\n- def test_prime_linklet(self): Prime genlet for use\n\n<|skeleton|>\nclass TestGeneratorLink:\n\n def test_prime(self):\n \"\"\"Prime generator for use\"\"\"\n <|body_0|>\n\n def test_close(self):\n \"\"\"Release underlying generator\"\"\"\n <|body_1|>\n\n def test_linklet(self):\n \"\"\"Chainlink via decorator\"\"\"\n <|body_2|>\n\n def test_prime_linklet(self):\n \"\"\"Prime genlet for use\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def generator():\n primer = (yield)\n yield primer\n prime_true = chainlet.genlink.GeneratorLink(generator(), prime=True)\n self.assertEqual(prime_true.send('pingpong'), 'pingpong')\n prime_false = chainlet.genlink.GeneratorLink(generator(), prime=False)\n self.assertIsNone(next(prime_false))\n self.assertEqual(prime_false.send('pingpong'), 'pingpong')\n for name, link in (('prime_true', prime_true), ('prime_false', prime_false)):\n with self.subTest(name=name):\n with self.assertRaises(StopIteration):\n next(link)\n<|end_body_0|>\n\n<|body_start_1|>\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n with self.subTest(case='close'):\n genlet = pingpong()\n genlet.close()\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n with self.subTest(case='throw'):\n genlet = pingpong()\n with self.assertRaises(GeneratorExit):\n genlet.throw(GeneratorExit)\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n<|end_body_1|>\n\n<|body_start_2|>\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n test_values = [0, 22, -22, 1000000.0, 'foobar'] + [random.random() for _ in range(20)]\n with self.subTest(case='generator interface'):\n genlet = pingpong()\n for value in test_values:\n self.assertEqual(genlet.send(value), value)\n self.assertIsNone(next(genlet))\n self.assertEqual(genlet.send(value), value)\n self.assertEqual(genlet.send(value), genlet.slave.send(value))\n with self.subTest(case='chain element'):\n chain = NamedChainlet('start') >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n with self.subTest(case='fill chain'):\n chain = NamedChainlet('start') >> pingpong() >> pingpong() >> pingpong() >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n<|end_body_2|>\n\n<|body_start_3|>\n for prime in (True, False):\n with self.subTest(prime=prime):\n\n @chainlet.genlet(prime)\n def prime_arg():\n primer = (yield)\n yield primer\n\n @chainlet.genlet(prime=prime)\n def prime_kwarg():\n primer = (yield)\n yield primer\n for genlet in (prime_arg, prime_kwarg):\n link = genlet()\n if not prime:\n self.assertIsNone(next(link))\n self.assertEqual(link.send('pingpong'), 'pingpong')\n with self.assertRaises(StopIteration):\n next(link)\n<|end_body_3|>\n", "revision_id": "4e17f9992b4780bd0d9309202e2847df640bffe8", "skeleton": "<|skeleton|>\nclass TestGeneratorLink:\n\n def test_prime(self):\n \"\"\"Prime generator for use\"\"\"\n <|body_0|>\n\n def test_close(self):\n \"\"\"Release underlying generator\"\"\"\n <|body_1|>\n\n def test_linklet(self):\n \"\"\"Chainlink via decorator\"\"\"\n <|body_2|>\n\n def test_prime_linklet(self):\n \"\"\"Prime genlet for use\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestGeneratorLink:\n def test_prime(self):\n \"\"\"Prime generator for use\"\"\"\n def generator():\n primer = (yield)\n yield primer\n prime_true = chainlet.genlink.GeneratorLink(generator(), prime=True)\n self.assertEqual(prime_true.send('pingpong'), 'pingpong')\n prime_false = chainlet.genlink.GeneratorLink(generator(), prime=False)\n self.assertIsNone(next(prime_false))\n self.assertEqual(prime_false.send('pingpong'), 'pingpong')\n for name, link in (('prime_true', prime_true), ('prime_false', prime_false)):\n with self.subTest(name=name):\n with self.assertRaises(StopIteration):\n next(link)\n\n def test_close(self):\n \"\"\"Release underlying generator\"\"\"\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n with self.subTest(case='close'):\n genlet = pingpong()\n genlet.close()\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n with self.subTest(case='throw'):\n genlet = pingpong()\n with self.assertRaises(GeneratorExit):\n genlet.throw(GeneratorExit)\n with self.assertRaises(StopIteration):\n next(genlet)\n with self.assertRaises(StopIteration):\n next(genlet.slave)\n\n def test_linklet(self):\n \"\"\"Chainlink via decorator\"\"\"\n @chainlet.genlet\n def pingpong():\n last = (yield)\n while True:\n last = (yield last)\n test_values = [0, 22, -22, 1000000.0, 'foobar'] + [random.random() for _ in range(20)]\n with self.subTest(case='generator interface'):\n genlet = pingpong()\n for value in test_values:\n self.assertEqual(genlet.send(value), value)\n self.assertIsNone(next(genlet))\n self.assertEqual(genlet.send(value), value)\n self.assertEqual(genlet.send(value), genlet.slave.send(value))\n with self.subTest(case='chain element'):\n chain = NamedChainlet('start') >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n with self.subTest(case='fill chain'):\n chain = NamedChainlet('start') >> pingpong() >> pingpong() >> pingpong() >> pingpong() >> NamedChainlet('stop')\n for value in test_values:\n self.assertEqual(chain.send(value), value)\n self.assertIsNone(next(chain))\n self.assertEqual(chain.send(value), value)\n\n def test_prime_linklet(self):\n \"\"\"Prime genlet for use\"\"\"\n for prime in (True, False):\n with self.subTest(prime=prime):\n\n @chainlet.genlet(prime)\n def prime_arg():\n primer = (yield)\n yield primer\n\n @chainlet.genlet(prime=prime)\n def prime_kwarg():\n primer = (yield)\n yield primer\n for genlet in (prime_arg, prime_kwarg):\n link = genlet()\n if not prime:\n self.assertIsNone(next(link))\n self.assertEqual(link.send('pingpong'), 'pingpong')\n with self.assertRaises(StopIteration):\n next(link)\n", "source": "the_stack_v2_python_sparse", "source_path": "chainlet_unittests/test_chainlet/test_genlink.py", "source_repo": "maxfischer2781/chainlet", "split": "test", "star_events_count": 1} {"blob_id": "547770d90a9977fc34cfe4108d6ddb35281932e3", "bodies": ["super().__init__()\nself.tagset_size = tagset_size\nself.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))\nif not init_from_state_dict:\n self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000\n self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000\nself.to(flair.device)", "batch_size, seq_len = features.size()[:2]\nemission_scores = features\nemission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)\ncrf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)\nreturn crf_scores"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.tagset_size = tagset_size\n self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))\n if not init_from_state_dict:\n self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000\n self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000\n self.to(flair.device)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, seq_len = features.size()[:2]\n emission_scores = features\n emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)\n crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)\n return crf_scores\n<|end_body_1|>\n", "class_docstring": "Conditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.", "class_name": "CRF", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CRF:\n \"\"\"Conditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.\"\"\"\n\n def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None:\n \"\"\"Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict\"\"\"\n <|body_0|>\n\n def forward(self, features: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.tagset_size = tagset_size\n self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))\n if not init_from_state_dict:\n self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000\n self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000\n self.to(flair.device)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, seq_len = features.size()[:2]\n emission_scores = features\n emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)\n crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)\n return crf_scores\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000158", "length_bytes": 2171, "license_type": "permissive", "methods": [{"docstring": "Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict", "name": "__init__", "signature": "def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None"}, {"docstring": "Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)", "name": "forward", "signature": "def forward(self, features: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005778", "prompt": "Implement the Python class `CRF` described below.\n\nClass description:\nConditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.\n\nMethod signatures and docstrings:\n- def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None: Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict\n- def forward(self, features: torch.Tensor) -> torch.Tensor: Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)", "prompted_full_text": "Implement the Python class `CRF` described below.\n\nClass description:\nConditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.\n\nMethod signatures and docstrings:\n- def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None: Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict\n- def forward(self, features: torch.Tensor) -> torch.Tensor: Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)\n\n<|skeleton|>\nclass CRF:\n \"\"\"Conditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.\"\"\"\n\n def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None:\n \"\"\"Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict\"\"\"\n <|body_0|>\n\n def forward(self, features: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.tagset_size = tagset_size\n self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))\n if not init_from_state_dict:\n self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000\n self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000\n self.to(flair.device)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, seq_len = features.size()[:2]\n emission_scores = features\n emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)\n crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)\n return crf_scores\n<|end_body_1|>\n", "revision_id": "1795ac80da18efadcd56b46374a40190abca07e4", "skeleton": "<|skeleton|>\nclass CRF:\n \"\"\"Conditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.\"\"\"\n\n def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None:\n \"\"\"Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict\"\"\"\n <|body_0|>\n\n def forward(self, features: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CRF:\n \"\"\"Conditional Random Field. Conditional Random Field Implementation according to sgrvinod (https://github.com/sgrvinod). Classifier which predicts single tag / class / label for given word based on not just the word, but also on previous seen annotations.\"\"\"\n\n def __init__(self, tag_dictionary, tagset_size: int, init_from_state_dict: bool) -> None:\n \"\"\"Initialize the Conditional Random Field. :param tag_dictionary: tag dictionary in order to find ID for start and stop tags :param tagset_size: number of tag from tag dictionary :param init_from_state_dict: whether we load pretrained model from state dict\"\"\"\n super().__init__()\n self.tagset_size = tagset_size\n self.transitions = torch.nn.Parameter(torch.randn(tagset_size, tagset_size))\n if not init_from_state_dict:\n self.transitions.detach()[tag_dictionary.get_idx_for_item(START_TAG), :] = -10000\n self.transitions.detach()[:, tag_dictionary.get_idx_for_item(STOP_TAG)] = -10000\n self.to(flair.device)\n\n def forward(self, features: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward propagation of Conditional Random Field. :param features: output from RNN / Linear layer in shape (batch size, seq len, hidden size) :return: CRF scores (emission scores for each token + transitions prob from previous state) in shape (batch_size, seq len, tagset size, tagset size)\"\"\"\n batch_size, seq_len = features.size()[:2]\n emission_scores = features\n emission_scores = emission_scores.unsqueeze(-1).expand(batch_size, seq_len, self.tagset_size, self.tagset_size)\n crf_scores = emission_scores + self.transitions.unsqueeze(0).unsqueeze(0)\n return crf_scores\n", "source": "the_stack_v2_python_sparse", "source_path": "flair/models/sequence_tagger_utils/crf.py", "source_repo": "flairNLP/flair", "split": "test", "star_events_count": 5684} {"blob_id": "c979fb8409696f1d1e25d2716c8f6b0404e5ad1f", "bodies": ["from .wrapper import NCNNWrapper\nif deploy_cfg:\n backend_config = get_backend_config(deploy_cfg)\n use_vulkan = backend_config.get('use_vulkan', False)\nelse:\n use_vulkan = False\nreturn NCNNWrapper(param_file=backend_files[0], bin_file=backend_files[1], output_names=output_names, use_vulkan=use_vulkan)", "import importlib\nfrom .init_plugins import get_onnx2ncnn_path, get_ops_path\nhas_pyncnn = importlib.util.find_spec('ncnn') is not None\nonnx2ncnn = get_onnx2ncnn_path()\nret = has_pyncnn and onnx2ncnn is not None\nif ret and with_custom_ops:\n has_pyncnn_ext = importlib.util.find_spec('mmdeploy.backend.ncnn.ncnn_ext') is not None\n op_path = get_ops_path()\n custom_ops_exist = osp.exists(op_path)\n ret = ret and has_pyncnn_ext and custom_ops_exist\nreturn ret", "if not cls.is_available():\n return 'None'\nelse:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'", "info = super().check_env(log_callback=log_callback)\navailable = cls.is_available()\nops_available = cls.is_available(with_custom_ops=True)\nops_available = 'Available' if ops_available else 'NotAvailable'\nif available:\n ops_info = f'ncnn custom ops:\\t{ops_available}'\n log_callback(ops_info)\n info = f'{info}\\n{ops_info}'\nreturn info", "logger = get_root_logger()\nfrom . import is_available\nif not is_available():\n logger.error('ncnn support is not available, please make sure:\\n1) `mmdeploy_onnx2ncnn` existed in `PATH`\\n2) python import ncnn success')\n sys.exit(1)\nfrom mmdeploy.apis.ncnn import get_output_model_file\nfrom .onnx2ncnn import from_onnx\nbackend_files = []\nfor onnx_path in ir_files:\n model_param_path, model_bin_path = get_output_model_file(onnx_path, work_dir)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n from_onnx(onnx_path, osp.join(work_dir, onnx_name))\n backend_files += [model_param_path, model_bin_path]\nreturn backend_files"], "bodies_text": "<|body_start_0|>\n from .wrapper import NCNNWrapper\n if deploy_cfg:\n backend_config = get_backend_config(deploy_cfg)\n use_vulkan = backend_config.get('use_vulkan', False)\n else:\n use_vulkan = False\n return NCNNWrapper(param_file=backend_files[0], bin_file=backend_files[1], output_names=output_names, use_vulkan=use_vulkan)\n<|end_body_0|>\n\n<|body_start_1|>\n import importlib\n from .init_plugins import get_onnx2ncnn_path, get_ops_path\n has_pyncnn = importlib.util.find_spec('ncnn') is not None\n onnx2ncnn = get_onnx2ncnn_path()\n ret = has_pyncnn and onnx2ncnn is not None\n if ret and with_custom_ops:\n has_pyncnn_ext = importlib.util.find_spec('mmdeploy.backend.ncnn.ncnn_ext') is not None\n op_path = get_ops_path()\n custom_ops_exist = osp.exists(op_path)\n ret = ret and has_pyncnn_ext and custom_ops_exist\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'\n<|end_body_2|>\n\n<|body_start_3|>\n info = super().check_env(log_callback=log_callback)\n available = cls.is_available()\n ops_available = cls.is_available(with_custom_ops=True)\n ops_available = 'Available' if ops_available else 'NotAvailable'\n if available:\n ops_info = f'ncnn custom ops:\\t{ops_available}'\n log_callback(ops_info)\n info = f'{info}\\n{ops_info}'\n return info\n<|end_body_3|>\n\n<|body_start_4|>\n logger = get_root_logger()\n from . import is_available\n if not is_available():\n logger.error('ncnn support is not available, please make sure:\\n1) `mmdeploy_onnx2ncnn` existed in `PATH`\\n2) python import ncnn success')\n sys.exit(1)\n from mmdeploy.apis.ncnn import get_output_model_file\n from .onnx2ncnn import from_onnx\n backend_files = []\n for onnx_path in ir_files:\n model_param_path, model_bin_path = get_output_model_file(onnx_path, work_dir)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n from_onnx(onnx_path, osp.join(work_dir, onnx_name))\n backend_files += [model_param_path, model_bin_path]\n return backend_files\n<|end_body_4|>\n", "class_docstring": "", "class_name": "NCNNManager", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NCNNManager:\n\n def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs):\n \"\"\"Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.\"\"\"\n <|body_0|>\n\n def is_available(cls, with_custom_ops: bool=False) -> bool:\n \"\"\"Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.\"\"\"\n <|body_1|>\n\n def get_version(cls) -> str:\n \"\"\"Get the version of the backend.\"\"\"\n <|body_2|>\n\n def check_env(cls, log_callback: Callable=lambda _: _) -> str:\n \"\"\"Check current environment. Returns: str: Info about the environment.\"\"\"\n <|body_3|>\n\n def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]:\n \"\"\"Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from .wrapper import NCNNWrapper\n if deploy_cfg:\n backend_config = get_backend_config(deploy_cfg)\n use_vulkan = backend_config.get('use_vulkan', False)\n else:\n use_vulkan = False\n return NCNNWrapper(param_file=backend_files[0], bin_file=backend_files[1], output_names=output_names, use_vulkan=use_vulkan)\n<|end_body_0|>\n\n<|body_start_1|>\n import importlib\n from .init_plugins import get_onnx2ncnn_path, get_ops_path\n has_pyncnn = importlib.util.find_spec('ncnn') is not None\n onnx2ncnn = get_onnx2ncnn_path()\n ret = has_pyncnn and onnx2ncnn is not None\n if ret and with_custom_ops:\n has_pyncnn_ext = importlib.util.find_spec('mmdeploy.backend.ncnn.ncnn_ext') is not None\n op_path = get_ops_path()\n custom_ops_exist = osp.exists(op_path)\n ret = ret and has_pyncnn_ext and custom_ops_exist\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'\n<|end_body_2|>\n\n<|body_start_3|>\n info = super().check_env(log_callback=log_callback)\n available = cls.is_available()\n ops_available = cls.is_available(with_custom_ops=True)\n ops_available = 'Available' if ops_available else 'NotAvailable'\n if available:\n ops_info = f'ncnn custom ops:\\t{ops_available}'\n log_callback(ops_info)\n info = f'{info}\\n{ops_info}'\n return info\n<|end_body_3|>\n\n<|body_start_4|>\n logger = get_root_logger()\n from . import is_available\n if not is_available():\n logger.error('ncnn support is not available, please make sure:\\n1) `mmdeploy_onnx2ncnn` existed in `PATH`\\n2) python import ncnn success')\n sys.exit(1)\n from mmdeploy.apis.ncnn import get_output_model_file\n from .onnx2ncnn import from_onnx\n backend_files = []\n for onnx_path in ir_files:\n model_param_path, model_bin_path = get_output_model_file(onnx_path, work_dir)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n from_onnx(onnx_path, osp.join(work_dir, onnx_name))\n backend_files += [model_param_path, model_bin_path]\n return backend_files\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000159", "length_bytes": 5173, "license_type": "permissive", "methods": [{"docstring": "Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.", "name": "build_wrapper", "signature": "def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs)"}, {"docstring": "Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.", "name": "is_available", "signature": "def is_available(cls, with_custom_ops: bool=False) -> bool"}, {"docstring": "Get the version of the backend.", "name": "get_version", "signature": "def get_version(cls) -> str"}, {"docstring": "Check current environment. Returns: str: Info about the environment.", "name": "check_env", "signature": "def check_env(cls, log_callback: Callable=lambda _: _) -> str"}, {"docstring": "Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.", "name": "to_backend", "signature": "def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_036265", "prompt": "Implement the Python class `NCNNManager` described below.\n\nClass description:\nImplement the NCNNManager class.\n\nMethod signatures and docstrings:\n- def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs): Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.\n- def is_available(cls, with_custom_ops: bool=False) -> bool: Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.\n- def get_version(cls) -> str: Get the version of the backend.\n- def check_env(cls, log_callback: Callable=lambda _: _) -> str: Check current environment. Returns: str: Info about the environment.\n- def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]: Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.", "prompted_full_text": "Implement the Python class `NCNNManager` described below.\n\nClass description:\nImplement the NCNNManager class.\n\nMethod signatures and docstrings:\n- def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs): Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.\n- def is_available(cls, with_custom_ops: bool=False) -> bool: Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.\n- def get_version(cls) -> str: Get the version of the backend.\n- def check_env(cls, log_callback: Callable=lambda _: _) -> str: Check current environment. Returns: str: Info about the environment.\n- def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]: Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.\n\n<|skeleton|>\nclass NCNNManager:\n\n def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs):\n \"\"\"Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.\"\"\"\n <|body_0|>\n\n def is_available(cls, with_custom_ops: bool=False) -> bool:\n \"\"\"Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.\"\"\"\n <|body_1|>\n\n def get_version(cls) -> str:\n \"\"\"Get the version of the backend.\"\"\"\n <|body_2|>\n\n def check_env(cls, log_callback: Callable=lambda _: _) -> str:\n \"\"\"Check current environment. Returns: str: Info about the environment.\"\"\"\n <|body_3|>\n\n def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]:\n \"\"\"Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from .wrapper import NCNNWrapper\n if deploy_cfg:\n backend_config = get_backend_config(deploy_cfg)\n use_vulkan = backend_config.get('use_vulkan', False)\n else:\n use_vulkan = False\n return NCNNWrapper(param_file=backend_files[0], bin_file=backend_files[1], output_names=output_names, use_vulkan=use_vulkan)\n<|end_body_0|>\n\n<|body_start_1|>\n import importlib\n from .init_plugins import get_onnx2ncnn_path, get_ops_path\n has_pyncnn = importlib.util.find_spec('ncnn') is not None\n onnx2ncnn = get_onnx2ncnn_path()\n ret = has_pyncnn and onnx2ncnn is not None\n if ret and with_custom_ops:\n has_pyncnn_ext = importlib.util.find_spec('mmdeploy.backend.ncnn.ncnn_ext') is not None\n op_path = get_ops_path()\n custom_ops_exist = osp.exists(op_path)\n ret = ret and has_pyncnn_ext and custom_ops_exist\n return ret\n<|end_body_1|>\n\n<|body_start_2|>\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'\n<|end_body_2|>\n\n<|body_start_3|>\n info = super().check_env(log_callback=log_callback)\n available = cls.is_available()\n ops_available = cls.is_available(with_custom_ops=True)\n ops_available = 'Available' if ops_available else 'NotAvailable'\n if available:\n ops_info = f'ncnn custom ops:\\t{ops_available}'\n log_callback(ops_info)\n info = f'{info}\\n{ops_info}'\n return info\n<|end_body_3|>\n\n<|body_start_4|>\n logger = get_root_logger()\n from . import is_available\n if not is_available():\n logger.error('ncnn support is not available, please make sure:\\n1) `mmdeploy_onnx2ncnn` existed in `PATH`\\n2) python import ncnn success')\n sys.exit(1)\n from mmdeploy.apis.ncnn import get_output_model_file\n from .onnx2ncnn import from_onnx\n backend_files = []\n for onnx_path in ir_files:\n model_param_path, model_bin_path = get_output_model_file(onnx_path, work_dir)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n from_onnx(onnx_path, osp.join(work_dir, onnx_name))\n backend_files += [model_param_path, model_bin_path]\n return backend_files\n<|end_body_4|>\n", "revision_id": "5479c8774f5b88d7ed9d399d4e305cb42cc2e73a", "skeleton": "<|skeleton|>\nclass NCNNManager:\n\n def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs):\n \"\"\"Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.\"\"\"\n <|body_0|>\n\n def is_available(cls, with_custom_ops: bool=False) -> bool:\n \"\"\"Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.\"\"\"\n <|body_1|>\n\n def get_version(cls) -> str:\n \"\"\"Get the version of the backend.\"\"\"\n <|body_2|>\n\n def check_env(cls, log_callback: Callable=lambda _: _) -> str:\n \"\"\"Check current environment. Returns: str: Info about the environment.\"\"\"\n <|body_3|>\n\n def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]:\n \"\"\"Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NCNNManager:\n def build_wrapper(cls, backend_files: Sequence[str], device: str='cpu', input_names: Optional[Sequence[str]]=None, output_names: Optional[Sequence[str]]=None, deploy_cfg: Optional[Any]=None, **kwargs):\n \"\"\"Build the wrapper for the backend model. Args: backend_files (Sequence[str]): Backend files. device (str, optional): The device info. Defaults to 'cpu'. input_names (Optional[Sequence[str]], optional): input names. Defaults to None. output_names (Optional[Sequence[str]], optional): output names. Defaults to None. deploy_cfg (Optional[Any], optional): The deploy config. Defaults to None.\"\"\"\n from .wrapper import NCNNWrapper\n if deploy_cfg:\n backend_config = get_backend_config(deploy_cfg)\n use_vulkan = backend_config.get('use_vulkan', False)\n else:\n use_vulkan = False\n return NCNNWrapper(param_file=backend_files[0], bin_file=backend_files[1], output_names=output_names, use_vulkan=use_vulkan)\n\n def is_available(cls, with_custom_ops: bool=False) -> bool:\n \"\"\"Check whether backend is installed. Args: with_custom_ops (bool): check custom ops exists. Returns: bool: True if backend package is installed.\"\"\"\n import importlib\n from .init_plugins import get_onnx2ncnn_path, get_ops_path\n has_pyncnn = importlib.util.find_spec('ncnn') is not None\n onnx2ncnn = get_onnx2ncnn_path()\n ret = has_pyncnn and onnx2ncnn is not None\n if ret and with_custom_ops:\n has_pyncnn_ext = importlib.util.find_spec('mmdeploy.backend.ncnn.ncnn_ext') is not None\n op_path = get_ops_path()\n custom_ops_exist = osp.exists(op_path)\n ret = ret and has_pyncnn_ext and custom_ops_exist\n return ret\n\n def get_version(cls) -> str:\n \"\"\"Get the version of the backend.\"\"\"\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'\n\n def check_env(cls, log_callback: Callable=lambda _: _) -> str:\n \"\"\"Check current environment. Returns: str: Info about the environment.\"\"\"\n info = super().check_env(log_callback=log_callback)\n available = cls.is_available()\n ops_available = cls.is_available(with_custom_ops=True)\n ops_available = 'Available' if ops_available else 'NotAvailable'\n if available:\n ops_info = f'ncnn custom ops:\\t{ops_available}'\n log_callback(ops_info)\n info = f'{info}\\n{ops_info}'\n return info\n\n def to_backend(cls, ir_files: Sequence[str], work_dir: str, log_level: int=logging.INFO, device: str='cpu', **kwargs) -> Sequence[str]:\n \"\"\"Convert intermediate representation to given backend. Args: ir_files (Sequence[str]): The intermediate representation files. work_dir (str): The work directory, backend files and logs should be saved in this directory. log_level (int, optional): The log level. Defaults to logging.INFO. device (str, optional): The device type. Defaults to 'cpu'. Returns: Sequence[str]: Backend files.\"\"\"\n logger = get_root_logger()\n from . import is_available\n if not is_available():\n logger.error('ncnn support is not available, please make sure:\\n1) `mmdeploy_onnx2ncnn` existed in `PATH`\\n2) python import ncnn success')\n sys.exit(1)\n from mmdeploy.apis.ncnn import get_output_model_file\n from .onnx2ncnn import from_onnx\n backend_files = []\n for onnx_path in ir_files:\n model_param_path, model_bin_path = get_output_model_file(onnx_path, work_dir)\n onnx_name = osp.splitext(osp.split(onnx_path)[1])[0]\n from_onnx(onnx_path, osp.join(work_dir, onnx_name))\n backend_files += [model_param_path, model_bin_path]\n return backend_files\n", "source": "the_stack_v2_python_sparse", "source_path": "mmdeploy/backend/ncnn/backend_manager.py", "source_repo": "open-mmlab/mmdeploy", "split": "test", "star_events_count": 2164} {"blob_id": "6df35a415abb600d742868d6281a4bbe8df0bb26", "bodies": ["case_path = os.path.join(os.getcwd(), caseName)\nif not os.path.exists(case_path):\n os.mkdir(case_path)\ntestcase = unittest.TestSuite()\ndiscover = unittest.defaultTestLoader.discover(case_path, pattern=rule, top_level_dir=None)\nprint(discover)\nfor test_suite in discover:\n for test_case in test_suite:\n print(test_case)\n testcase.addTests(test_case)\nreturn testcase", "now = time.strftime('%Y_%m_%d_%H_%S')\nreport_path = os.path.join(cur_path, reportName)\nif not os.path.exists(report_path):\n os.mkdir(report_path)\nreport_abspath = os.path.join(report_path, now + '17网校直播Android测试报告.html')\nwith open(report_abspath, 'wb') as fp:\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='17网校直播Android测试报告', description='用例执行情况')\n runner.run(allcase)"], "bodies_text": "<|body_start_0|>\n case_path = os.path.join(os.getcwd(), caseName)\n if not os.path.exists(case_path):\n os.mkdir(case_path)\n testcase = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(case_path, pattern=rule, top_level_dir=None)\n print(discover)\n for test_suite in discover:\n for test_case in test_suite:\n print(test_case)\n testcase.addTests(test_case)\n return testcase\n<|end_body_0|>\n\n<|body_start_1|>\n now = time.strftime('%Y_%m_%d_%H_%S')\n report_path = os.path.join(cur_path, reportName)\n if not os.path.exists(report_path):\n os.mkdir(report_path)\n report_abspath = os.path.join(report_path, now + '17网校直播Android测试报告.html')\n with open(report_abspath, 'wb') as fp:\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='17网校直播Android测试报告', description='用例执行情况')\n runner.run(allcase)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "mainTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass mainTest:\n\n def add_case(caseName='testCase', rule='case*.py'):\n \"\"\"作用:加载所有测试用例 :param caseName: :param rule: :return:\"\"\"\n <|body_0|>\n\n def run_case(self, allcase, reportName='reports'):\n \"\"\"作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n case_path = os.path.join(os.getcwd(), caseName)\n if not os.path.exists(case_path):\n os.mkdir(case_path)\n testcase = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(case_path, pattern=rule, top_level_dir=None)\n print(discover)\n for test_suite in discover:\n for test_case in test_suite:\n print(test_case)\n testcase.addTests(test_case)\n return testcase\n<|end_body_0|>\n\n<|body_start_1|>\n now = time.strftime('%Y_%m_%d_%H_%S')\n report_path = os.path.join(cur_path, reportName)\n if not os.path.exists(report_path):\n os.mkdir(report_path)\n report_abspath = os.path.join(report_path, now + '17网校直播Android测试报告.html')\n with open(report_abspath, 'wb') as fp:\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='17网校直播Android测试报告', description='用例执行情况')\n runner.run(allcase)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000160", "length_bytes": 1964, "license_type": "no_license", "methods": [{"docstring": "作用:加载所有测试用例 :param caseName: :param rule: :return:", "name": "add_case", "signature": "def add_case(caseName='testCase', rule='case*.py')"}, {"docstring": "作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:", "name": "run_case", "signature": "def run_case(self, allcase, reportName='reports')"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000754", "prompt": "Implement the Python class `mainTest` described below.\n\nClass description:\nImplement the mainTest class.\n\nMethod signatures and docstrings:\n- def add_case(caseName='testCase', rule='case*.py'): 作用:加载所有测试用例 :param caseName: :param rule: :return:\n- def run_case(self, allcase, reportName='reports'): 作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:", "prompted_full_text": "Implement the Python class `mainTest` described below.\n\nClass description:\nImplement the mainTest class.\n\nMethod signatures and docstrings:\n- def add_case(caseName='testCase', rule='case*.py'): 作用:加载所有测试用例 :param caseName: :param rule: :return:\n- def run_case(self, allcase, reportName='reports'): 作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:\n\n<|skeleton|>\nclass mainTest:\n\n def add_case(caseName='testCase', rule='case*.py'):\n \"\"\"作用:加载所有测试用例 :param caseName: :param rule: :return:\"\"\"\n <|body_0|>\n\n def run_case(self, allcase, reportName='reports'):\n \"\"\"作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n case_path = os.path.join(os.getcwd(), caseName)\n if not os.path.exists(case_path):\n os.mkdir(case_path)\n testcase = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(case_path, pattern=rule, top_level_dir=None)\n print(discover)\n for test_suite in discover:\n for test_case in test_suite:\n print(test_case)\n testcase.addTests(test_case)\n return testcase\n<|end_body_0|>\n\n<|body_start_1|>\n now = time.strftime('%Y_%m_%d_%H_%S')\n report_path = os.path.join(cur_path, reportName)\n if not os.path.exists(report_path):\n os.mkdir(report_path)\n report_abspath = os.path.join(report_path, now + '17网校直播Android测试报告.html')\n with open(report_abspath, 'wb') as fp:\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='17网校直播Android测试报告', description='用例执行情况')\n runner.run(allcase)\n<|end_body_1|>\n", "revision_id": "2a1734b20b042db3c5f4fc02f122f2280f401932", "skeleton": "<|skeleton|>\nclass mainTest:\n\n def add_case(caseName='testCase', rule='case*.py'):\n \"\"\"作用:加载所有测试用例 :param caseName: :param rule: :return:\"\"\"\n <|body_0|>\n\n def run_case(self, allcase, reportName='reports'):\n \"\"\"作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class mainTest:\n def add_case(caseName='testCase', rule='case*.py'):\n \"\"\"作用:加载所有测试用例 :param caseName: :param rule: :return:\"\"\"\n case_path = os.path.join(os.getcwd(), caseName)\n if not os.path.exists(case_path):\n os.mkdir(case_path)\n testcase = unittest.TestSuite()\n discover = unittest.defaultTestLoader.discover(case_path, pattern=rule, top_level_dir=None)\n print(discover)\n for test_suite in discover:\n for test_case in test_suite:\n print(test_case)\n testcase.addTests(test_case)\n return testcase\n\n def run_case(self, allcase, reportName='reports'):\n \"\"\"作用:执行所有的用例,并把执行结果写入HTML测试报告中 :param all_case: :param reportName: :return:\"\"\"\n now = time.strftime('%Y_%m_%d_%H_%S')\n report_path = os.path.join(cur_path, reportName)\n if not os.path.exists(report_path):\n os.mkdir(report_path)\n report_abspath = os.path.join(report_path, now + '17网校直播Android测试报告.html')\n with open(report_abspath, 'wb') as fp:\n runner = HTMLTestRunner.HTMLTestRunner(stream=fp, title='17网校直播Android测试报告', description='用例执行情况')\n runner.run(allcase)\n", "source": "the_stack_v2_python_sparse", "source_path": "liveTest/main.py", "source_repo": "Lee-hsien-sen/liveRoomAndroidUiTest", "split": "test", "star_events_count": 2} {"blob_id": "9e4c9d3343448eacac22a93c90341d1b194cd362", "bodies": ["self.min = np.array([np.nan])\nself.value = np.nan\nself.domain = np.array([[0.0, 1.0]])\nself.n = 1\nself.smooth = True\nself.info = [True, False, False]\nself.latex_name = 'Forrester Function'\nself.latex_type = 'Many Local Minima'\nself.latex_cost = '\\\\[ f(x) = (6x - 2)^2 \\\\sin(12x - 4) \\\\]'\nself.latex_desc = 'This is a simple one-dimensional test function. '", "c = np.zeros(x.shape[1:])\nc = np.sin(12 * x[0] - 4) * (6 * x[0] - 2) ** 2\nreturn c"], "bodies_text": "<|body_start_0|>\n self.min = np.array([np.nan])\n self.value = np.nan\n self.domain = np.array([[0.0, 1.0]])\n self.n = 1\n self.smooth = True\n self.info = [True, False, False]\n self.latex_name = 'Forrester Function'\n self.latex_type = 'Many Local Minima'\n self.latex_cost = '\\\\[ f(x) = (6x - 2)^2 \\\\sin(12x - 4) \\\\]'\n self.latex_desc = 'This is a simple one-dimensional test function. '\n<|end_body_0|>\n\n<|body_start_1|>\n c = np.zeros(x.shape[1:])\n c = np.sin(12 * x[0] - 4) * (6 * x[0] - 2) ** 2\n return c\n<|end_body_1|>\n", "class_docstring": "Forrester Function.", "class_name": "Forrester", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Forrester:\n \"\"\"Forrester Function.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def cost(self, x):\n \"\"\"Cost function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.min = np.array([np.nan])\n self.value = np.nan\n self.domain = np.array([[0.0, 1.0]])\n self.n = 1\n self.smooth = True\n self.info = [True, False, False]\n self.latex_name = 'Forrester Function'\n self.latex_type = 'Many Local Minima'\n self.latex_cost = '\\\\[ f(x) = (6x - 2)^2 \\\\sin(12x - 4) \\\\]'\n self.latex_desc = 'This is a simple one-dimensional test function. '\n<|end_body_0|>\n\n<|body_start_1|>\n c = np.zeros(x.shape[1:])\n c = np.sin(12 * x[0] - 4) * (6 * x[0] - 2) ** 2\n return c\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000161", "length_bytes": 903, "license_type": "no_license", "methods": [{"docstring": "Constructor.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Cost function.", "name": "cost", "signature": "def cost(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012097", "prompt": "Implement the Python class `Forrester` described below.\n\nClass description:\nForrester Function.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor.\n- def cost(self, x): Cost function.", "prompted_full_text": "Implement the Python class `Forrester` described below.\n\nClass description:\nForrester Function.\n\nMethod signatures and docstrings:\n- def __init__(self): Constructor.\n- def cost(self, x): Cost function.\n\n<|skeleton|>\nclass Forrester:\n \"\"\"Forrester Function.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def cost(self, x):\n \"\"\"Cost function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.min = np.array([np.nan])\n self.value = np.nan\n self.domain = np.array([[0.0, 1.0]])\n self.n = 1\n self.smooth = True\n self.info = [True, False, False]\n self.latex_name = 'Forrester Function'\n self.latex_type = 'Many Local Minima'\n self.latex_cost = '\\\\[ f(x) = (6x - 2)^2 \\\\sin(12x - 4) \\\\]'\n self.latex_desc = 'This is a simple one-dimensional test function. '\n<|end_body_0|>\n\n<|body_start_1|>\n c = np.zeros(x.shape[1:])\n c = np.sin(12 * x[0] - 4) * (6 * x[0] - 2) ** 2\n return c\n<|end_body_1|>\n", "revision_id": "f2a74df3ab01ac35ea8d80569da909ffa1e86af3", "skeleton": "<|skeleton|>\nclass Forrester:\n \"\"\"Forrester Function.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n <|body_0|>\n\n def cost(self, x):\n \"\"\"Cost function.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Forrester:\n \"\"\"Forrester Function.\"\"\"\n\n def __init__(self):\n \"\"\"Constructor.\"\"\"\n self.min = np.array([np.nan])\n self.value = np.nan\n self.domain = np.array([[0.0, 1.0]])\n self.n = 1\n self.smooth = True\n self.info = [True, False, False]\n self.latex_name = 'Forrester Function'\n self.latex_type = 'Many Local Minima'\n self.latex_cost = '\\\\[ f(x) = (6x - 2)^2 \\\\sin(12x - 4) \\\\]'\n self.latex_desc = 'This is a simple one-dimensional test function. '\n\n def cost(self, x):\n \"\"\"Cost function.\"\"\"\n c = np.zeros(x.shape[1:])\n c = np.sin(12 * x[0] - 4) * (6 * x[0] - 2) ** 2\n return c\n", "source": "the_stack_v2_python_sparse", "source_path": "ctf/functions1d/forrester.py", "source_repo": "cntaylor/ctf", "split": "test", "star_events_count": 1} {"blob_id": "057807310c3dcf027a2c3f41e04b0ad1a290aae2", "bodies": ["Document.__init__(self)\nsession.info('Exporting to NMF XML version 1.0')\nwith session._objslock:\n self.scenarioPlan = ScenarioPlan(self, session)\n if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:\n deployment = CoreDeploymentWriter(self, self.scenarioPlan, session)\n deployment.add_deployment()\n self.scenarioPlan.setAttribute('deployed', 'true')", "self.scenarioPlan.coreSession.info('saving session XML file %s' % filename)\nf = open(filename, 'w')\nDocument.writexml(self, writer=f, indent='', addindent=' ', newl='\\n', encoding='UTF-8')\nf.close()\nif self.scenarioPlan.coreSession.user is not None:\n uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid\n gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid\n os.chown(filename, uid, gid)"], "bodies_text": "<|body_start_0|>\n Document.__init__(self)\n session.info('Exporting to NMF XML version 1.0')\n with session._objslock:\n self.scenarioPlan = ScenarioPlan(self, session)\n if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:\n deployment = CoreDeploymentWriter(self, self.scenarioPlan, session)\n deployment.add_deployment()\n self.scenarioPlan.setAttribute('deployed', 'true')\n<|end_body_0|>\n\n<|body_start_1|>\n self.scenarioPlan.coreSession.info('saving session XML file %s' % filename)\n f = open(filename, 'w')\n Document.writexml(self, writer=f, indent='', addindent=' ', newl='\\n', encoding='UTF-8')\n f.close()\n if self.scenarioPlan.coreSession.user is not None:\n uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid\n gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid\n os.chown(filename, uid, gid)\n<|end_body_1|>\n", "class_docstring": "Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.", "class_name": "CoreDocumentWriter1", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CoreDocumentWriter1:\n \"\"\"Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.\"\"\"\n\n def __init__(self, session):\n \"\"\"Create an empty Scenario XML Document, then populate it with objects from the given session.\"\"\"\n <|body_0|>\n\n def writexml(self, filename):\n \"\"\"Commit to file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Document.__init__(self)\n session.info('Exporting to NMF XML version 1.0')\n with session._objslock:\n self.scenarioPlan = ScenarioPlan(self, session)\n if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:\n deployment = CoreDeploymentWriter(self, self.scenarioPlan, session)\n deployment.add_deployment()\n self.scenarioPlan.setAttribute('deployed', 'true')\n<|end_body_0|>\n\n<|body_start_1|>\n self.scenarioPlan.coreSession.info('saving session XML file %s' % filename)\n f = open(filename, 'w')\n Document.writexml(self, writer=f, indent='', addindent=' ', newl='\\n', encoding='UTF-8')\n f.close()\n if self.scenarioPlan.coreSession.user is not None:\n uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid\n gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid\n os.chown(filename, uid, gid)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000162", "length_bytes": 37675, "license_type": "permissive", "methods": [{"docstring": "Create an empty Scenario XML Document, then populate it with objects from the given session.", "name": "__init__", "signature": "def __init__(self, session)"}, {"docstring": "Commit to file", "name": "writexml", "signature": "def writexml(self, filename)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006658", "prompt": "Implement the Python class `CoreDocumentWriter1` described below.\n\nClass description:\nUtility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.\n\nMethod signatures and docstrings:\n- def __init__(self, session): Create an empty Scenario XML Document, then populate it with objects from the given session.\n- def writexml(self, filename): Commit to file", "prompted_full_text": "Implement the Python class `CoreDocumentWriter1` described below.\n\nClass description:\nUtility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.\n\nMethod signatures and docstrings:\n- def __init__(self, session): Create an empty Scenario XML Document, then populate it with objects from the given session.\n- def writexml(self, filename): Commit to file\n\n<|skeleton|>\nclass CoreDocumentWriter1:\n \"\"\"Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.\"\"\"\n\n def __init__(self, session):\n \"\"\"Create an empty Scenario XML Document, then populate it with objects from the given session.\"\"\"\n <|body_0|>\n\n def writexml(self, filename):\n \"\"\"Commit to file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Document.__init__(self)\n session.info('Exporting to NMF XML version 1.0')\n with session._objslock:\n self.scenarioPlan = ScenarioPlan(self, session)\n if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:\n deployment = CoreDeploymentWriter(self, self.scenarioPlan, session)\n deployment.add_deployment()\n self.scenarioPlan.setAttribute('deployed', 'true')\n<|end_body_0|>\n\n<|body_start_1|>\n self.scenarioPlan.coreSession.info('saving session XML file %s' % filename)\n f = open(filename, 'w')\n Document.writexml(self, writer=f, indent='', addindent=' ', newl='\\n', encoding='UTF-8')\n f.close()\n if self.scenarioPlan.coreSession.user is not None:\n uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid\n gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid\n os.chown(filename, uid, gid)\n<|end_body_1|>\n", "revision_id": "9c246b0ae0e9182dcf61acc4faee41841d5cbd51", "skeleton": "<|skeleton|>\nclass CoreDocumentWriter1:\n \"\"\"Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.\"\"\"\n\n def __init__(self, session):\n \"\"\"Create an empty Scenario XML Document, then populate it with objects from the given session.\"\"\"\n <|body_0|>\n\n def writexml(self, filename):\n \"\"\"Commit to file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CoreDocumentWriter1:\n \"\"\"Utility class for writing a CoreSession to XML in the NMF scenPlan schema. The init method builds an xml.dom.minidom.Document, and the writexml() method saves the XML file.\"\"\"\n\n def __init__(self, session):\n \"\"\"Create an empty Scenario XML Document, then populate it with objects from the given session.\"\"\"\n Document.__init__(self)\n session.info('Exporting to NMF XML version 1.0')\n with session._objslock:\n self.scenarioPlan = ScenarioPlan(self, session)\n if session.getstate() == coreapi.CORE_EVENT_RUNTIME_STATE:\n deployment = CoreDeploymentWriter(self, self.scenarioPlan, session)\n deployment.add_deployment()\n self.scenarioPlan.setAttribute('deployed', 'true')\n\n def writexml(self, filename):\n \"\"\"Commit to file\"\"\"\n self.scenarioPlan.coreSession.info('saving session XML file %s' % filename)\n f = open(filename, 'w')\n Document.writexml(self, writer=f, indent='', addindent=' ', newl='\\n', encoding='UTF-8')\n f.close()\n if self.scenarioPlan.coreSession.user is not None:\n uid = pwd.getpwnam(self.scenarioPlan.coreSession.user).pw_uid\n gid = os.stat(self.scenarioPlan.coreSession.sessiondir).st_gid\n os.chown(filename, uid, gid)\n", "source": "the_stack_v2_python_sparse", "source_path": "coreemu-read-only/daemon/core/misc/xmlwriter1.py", "source_repo": "ermin-sakic/common-open-research-emulator-CORE", "split": "test", "star_events_count": 3} {"blob_id": "f44d23c87bc5611f0be42da658e931a2219e01b3", "bodies": ["self.socket = _ConfigSocket()\nself.logging = _ConfigLogging()\nself._loading_config(config_path, config_name)", "if isinstance(config, str) and config.find('$env.') != -1:\n env_name = config.replace('$env.', '')\n return os.getenv(env_name)\nreturn config", "if len(config_path) and os.path.exists(config_path):\n abs_path = config_path\nelse:\n abs_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\ncf_path = '%s/%s' % (abs_path, config_name)\nif not os.path.exists(cf_path):\n print('ERR: config file `%s` not exists' % cf_path)\n exit(1)\nfs = open(cf_path, encoding='UTF-8')\nconfigs = yaml.load(fs, Loader=yaml.FullLoader)\nsocket = configs.get('socket', {})\nsocket_file = self._get_env_config(socket.get('file'))\nif socket_file:\n self.socket.file = socket_file\nlogger = configs.get('logging', {})\nlogger_level = self._get_env_config(logger.get('level'))\nif logger_level:\n self.logging.level = logger_level"], "bodies_text": "<|body_start_0|>\n self.socket = _ConfigSocket()\n self.logging = _ConfigLogging()\n self._loading_config(config_path, config_name)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(config, str) and config.find('$env.') != -1:\n env_name = config.replace('$env.', '')\n return os.getenv(env_name)\n return config\n<|end_body_1|>\n\n<|body_start_2|>\n if len(config_path) and os.path.exists(config_path):\n abs_path = config_path\n else:\n abs_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n cf_path = '%s/%s' % (abs_path, config_name)\n if not os.path.exists(cf_path):\n print('ERR: config file `%s` not exists' % cf_path)\n exit(1)\n fs = open(cf_path, encoding='UTF-8')\n configs = yaml.load(fs, Loader=yaml.FullLoader)\n socket = configs.get('socket', {})\n socket_file = self._get_env_config(socket.get('file'))\n if socket_file:\n self.socket.file = socket_file\n logger = configs.get('logging', {})\n logger_level = self._get_env_config(logger.get('level'))\n if logger_level:\n self.logging.level = logger_level\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Config", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Config:\n\n def __init__(self, config_path: str='', config_name: str='config.yaml'):\n \"\"\"init config :param config_path: local config file path :param config_name: local config file name\"\"\"\n <|body_0|>\n\n def _get_env_config(config: str):\n \"\"\"get the configuration in the local environment variable :param config: :return:\"\"\"\n <|body_1|>\n\n def _loading_config(self, config_path: str, config_name: str):\n \"\"\"load local configuration file :param config_path: :param config_name: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.socket = _ConfigSocket()\n self.logging = _ConfigLogging()\n self._loading_config(config_path, config_name)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(config, str) and config.find('$env.') != -1:\n env_name = config.replace('$env.', '')\n return os.getenv(env_name)\n return config\n<|end_body_1|>\n\n<|body_start_2|>\n if len(config_path) and os.path.exists(config_path):\n abs_path = config_path\n else:\n abs_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n cf_path = '%s/%s' % (abs_path, config_name)\n if not os.path.exists(cf_path):\n print('ERR: config file `%s` not exists' % cf_path)\n exit(1)\n fs = open(cf_path, encoding='UTF-8')\n configs = yaml.load(fs, Loader=yaml.FullLoader)\n socket = configs.get('socket', {})\n socket_file = self._get_env_config(socket.get('file'))\n if socket_file:\n self.socket.file = socket_file\n logger = configs.get('logging', {})\n logger_level = self._get_env_config(logger.get('level'))\n if logger_level:\n self.logging.level = logger_level\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000163", "length_bytes": 3877, "license_type": "permissive", "methods": [{"docstring": "init config :param config_path: local config file path :param config_name: local config file name", "name": "__init__", "signature": "def __init__(self, config_path: str='', config_name: str='config.yaml')"}, {"docstring": "get the configuration in the local environment variable :param config: :return:", "name": "_get_env_config", "signature": "def _get_env_config(config: str)"}, {"docstring": "load local configuration file :param config_path: :param config_name: :return:", "name": "_loading_config", "signature": "def _loading_config(self, config_path: str, config_name: str)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_032914", "prompt": "Implement the Python class `Config` described below.\n\nClass description:\nImplement the Config class.\n\nMethod signatures and docstrings:\n- def __init__(self, config_path: str='', config_name: str='config.yaml'): init config :param config_path: local config file path :param config_name: local config file name\n- def _get_env_config(config: str): get the configuration in the local environment variable :param config: :return:\n- def _loading_config(self, config_path: str, config_name: str): load local configuration file :param config_path: :param config_name: :return:", "prompted_full_text": "Implement the Python class `Config` described below.\n\nClass description:\nImplement the Config class.\n\nMethod signatures and docstrings:\n- def __init__(self, config_path: str='', config_name: str='config.yaml'): init config :param config_path: local config file path :param config_name: local config file name\n- def _get_env_config(config: str): get the configuration in the local environment variable :param config: :return:\n- def _loading_config(self, config_path: str, config_name: str): load local configuration file :param config_path: :param config_name: :return:\n\n<|skeleton|>\nclass Config:\n\n def __init__(self, config_path: str='', config_name: str='config.yaml'):\n \"\"\"init config :param config_path: local config file path :param config_name: local config file name\"\"\"\n <|body_0|>\n\n def _get_env_config(config: str):\n \"\"\"get the configuration in the local environment variable :param config: :return:\"\"\"\n <|body_1|>\n\n def _loading_config(self, config_path: str, config_name: str):\n \"\"\"load local configuration file :param config_path: :param config_name: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.socket = _ConfigSocket()\n self.logging = _ConfigLogging()\n self._loading_config(config_path, config_name)\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(config, str) and config.find('$env.') != -1:\n env_name = config.replace('$env.', '')\n return os.getenv(env_name)\n return config\n<|end_body_1|>\n\n<|body_start_2|>\n if len(config_path) and os.path.exists(config_path):\n abs_path = config_path\n else:\n abs_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n cf_path = '%s/%s' % (abs_path, config_name)\n if not os.path.exists(cf_path):\n print('ERR: config file `%s` not exists' % cf_path)\n exit(1)\n fs = open(cf_path, encoding='UTF-8')\n configs = yaml.load(fs, Loader=yaml.FullLoader)\n socket = configs.get('socket', {})\n socket_file = self._get_env_config(socket.get('file'))\n if socket_file:\n self.socket.file = socket_file\n logger = configs.get('logging', {})\n logger_level = self._get_env_config(logger.get('level'))\n if logger_level:\n self.logging.level = logger_level\n<|end_body_2|>\n", "revision_id": "fe6f842ab4977820ff812e2f64f9cd2fbb00a62c", "skeleton": "<|skeleton|>\nclass Config:\n\n def __init__(self, config_path: str='', config_name: str='config.yaml'):\n \"\"\"init config :param config_path: local config file path :param config_name: local config file name\"\"\"\n <|body_0|>\n\n def _get_env_config(config: str):\n \"\"\"get the configuration in the local environment variable :param config: :return:\"\"\"\n <|body_1|>\n\n def _loading_config(self, config_path: str, config_name: str):\n \"\"\"load local configuration file :param config_path: :param config_name: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Config:\n def __init__(self, config_path: str='', config_name: str='config.yaml'):\n \"\"\"init config :param config_path: local config file path :param config_name: local config file name\"\"\"\n self.socket = _ConfigSocket()\n self.logging = _ConfigLogging()\n self._loading_config(config_path, config_name)\n\n def _get_env_config(config: str):\n \"\"\"get the configuration in the local environment variable :param config: :return:\"\"\"\n if isinstance(config, str) and config.find('$env.') != -1:\n env_name = config.replace('$env.', '')\n return os.getenv(env_name)\n return config\n\n def _loading_config(self, config_path: str, config_name: str):\n \"\"\"load local configuration file :param config_path: :param config_name: :return:\"\"\"\n if len(config_path) and os.path.exists(config_path):\n abs_path = config_path\n else:\n abs_path = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))\n cf_path = '%s/%s' % (abs_path, config_name)\n if not os.path.exists(cf_path):\n print('ERR: config file `%s` not exists' % cf_path)\n exit(1)\n fs = open(cf_path, encoding='UTF-8')\n configs = yaml.load(fs, Loader=yaml.FullLoader)\n socket = configs.get('socket', {})\n socket_file = self._get_env_config(socket.get('file'))\n if socket_file:\n self.socket.file = socket_file\n logger = configs.get('logging', {})\n logger_level = self._get_env_config(logger.get('level'))\n if logger_level:\n self.logging.level = logger_level\n", "source": "the_stack_v2_python_sparse", "source_path": "apisix/runner/server/config.py", "source_repo": "gdzy1987/apisix-python-plugin-runner", "split": "test", "star_events_count": 0} {"blob_id": "d809d194b4c92555607d0470235af280a3599c9b", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\nprop_Val = get_Col('propVal', repo)\nfoodPan = get_Col('foodPan', repo)\nhospitals = get_Col('Hospitals', repo)\nmaster = create_final(hospitals, 'zip_code', 'location_zip', 'hospitals')\nmaster_hosp = master[0]\nmaster_zips = master[1]\nmaster = create_food(foodPan, 'zip_code', 'food_Pantries', master_zips, master_hosp)\nmaster = create_prop(prop_Val, 'zipcode', 'zip_code', 'propery_value_average', master_zips, master)\nrepo.dropPermanent('Food_Prop_Hosp_ZipCodes')\nrepo.createPermanent('Food_Prop_Hosp_ZipCodes')\nrepo['ckarjadi_johnnyg7.Food_Prop_Hosp_ZipCodes'].insert_many(master)\nrepo.logout()\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\ndoc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\nthis_script = doc.agent('alg:same_zip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\nresource1 = doc.entity('dat:hospital_reduction', {'prov:label': 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataResource', prov.model.PROV_TYPE: 'ont:Computation'})\nget_FPH = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.wasAssociatedWith(get_FPH, this_script)\ndoc.usage(get_FPH, resource1, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '???'})\nFPH = doc.entity('dat:ckarjadi_johnnyg7#FPH', {prov.model.PROV_LABEL: 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(FPH, this_script)\ndoc.wasGeneratedBy(FPH, get_FPH, endTime)\ndoc.wasDerivedFrom(FPH, resource1, get_FPH, get_FPH, get_FPH)\nrepo.record(doc.serialize())\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n prop_Val = get_Col('propVal', repo)\n foodPan = get_Col('foodPan', repo)\n hospitals = get_Col('Hospitals', repo)\n master = create_final(hospitals, 'zip_code', 'location_zip', 'hospitals')\n master_hosp = master[0]\n master_zips = master[1]\n master = create_food(foodPan, 'zip_code', 'food_Pantries', master_zips, master_hosp)\n master = create_prop(prop_Val, 'zipcode', 'zip_code', 'propery_value_average', master_zips, master)\n repo.dropPermanent('Food_Prop_Hosp_ZipCodes')\n repo.createPermanent('Food_Prop_Hosp_ZipCodes')\n repo['ckarjadi_johnnyg7.Food_Prop_Hosp_ZipCodes'].insert_many(master)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:same_zip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource1 = doc.entity('dat:hospital_reduction', {'prov:label': 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataResource', prov.model.PROV_TYPE: 'ont:Computation'})\n get_FPH = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_FPH, this_script)\n doc.usage(get_FPH, resource1, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '???'})\n FPH = doc.entity('dat:ckarjadi_johnnyg7#FPH', {prov.model.PROV_LABEL: 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(FPH, this_script)\n doc.wasGeneratedBy(FPH, get_FPH, endTime)\n doc.wasDerivedFrom(FPH, resource1, get_FPH, get_FPH, get_FPH)\n repo.record(doc.serialize())\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "same_zip", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass same_zip:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n prop_Val = get_Col('propVal', repo)\n foodPan = get_Col('foodPan', repo)\n hospitals = get_Col('Hospitals', repo)\n master = create_final(hospitals, 'zip_code', 'location_zip', 'hospitals')\n master_hosp = master[0]\n master_zips = master[1]\n master = create_food(foodPan, 'zip_code', 'food_Pantries', master_zips, master_hosp)\n master = create_prop(prop_Val, 'zipcode', 'zip_code', 'propery_value_average', master_zips, master)\n repo.dropPermanent('Food_Prop_Hosp_ZipCodes')\n repo.createPermanent('Food_Prop_Hosp_ZipCodes')\n repo['ckarjadi_johnnyg7.Food_Prop_Hosp_ZipCodes'].insert_many(master)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:same_zip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource1 = doc.entity('dat:hospital_reduction', {'prov:label': 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataResource', prov.model.PROV_TYPE: 'ont:Computation'})\n get_FPH = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_FPH, this_script)\n doc.usage(get_FPH, resource1, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '???'})\n FPH = doc.entity('dat:ckarjadi_johnnyg7#FPH', {prov.model.PROV_LABEL: 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(FPH, this_script)\n doc.wasGeneratedBy(FPH, get_FPH, endTime)\n doc.wasDerivedFrom(FPH, resource1, get_FPH, get_FPH, get_FPH)\n repo.record(doc.serialize())\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000164", "length_bytes": 10648, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets (not using the API here for the sake of simplicity).", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "prompt": "Implement the Python class `same_zip` described below.\n\nClass description:\nImplement the same_zip class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `same_zip` described below.\n\nClass description:\nImplement the same_zip class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass same_zip:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n prop_Val = get_Col('propVal', repo)\n foodPan = get_Col('foodPan', repo)\n hospitals = get_Col('Hospitals', repo)\n master = create_final(hospitals, 'zip_code', 'location_zip', 'hospitals')\n master_hosp = master[0]\n master_zips = master[1]\n master = create_food(foodPan, 'zip_code', 'food_Pantries', master_zips, master_hosp)\n master = create_prop(prop_Val, 'zipcode', 'zip_code', 'propery_value_average', master_zips, master)\n repo.dropPermanent('Food_Prop_Hosp_ZipCodes')\n repo.createPermanent('Food_Prop_Hosp_ZipCodes')\n repo['ckarjadi_johnnyg7.Food_Prop_Hosp_ZipCodes'].insert_many(master)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:same_zip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource1 = doc.entity('dat:hospital_reduction', {'prov:label': 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataResource', prov.model.PROV_TYPE: 'ont:Computation'})\n get_FPH = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_FPH, this_script)\n doc.usage(get_FPH, resource1, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '???'})\n FPH = doc.entity('dat:ckarjadi_johnnyg7#FPH', {prov.model.PROV_LABEL: 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(FPH, this_script)\n doc.wasGeneratedBy(FPH, get_FPH, endTime)\n doc.wasDerivedFrom(FPH, resource1, get_FPH, get_FPH, get_FPH)\n repo.record(doc.serialize())\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "9cb0ad789b6ff265222cbd3ea3561ff553b4cdff", "skeleton": "<|skeleton|>\nclass same_zip:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class same_zip:\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n prop_Val = get_Col('propVal', repo)\n foodPan = get_Col('foodPan', repo)\n hospitals = get_Col('Hospitals', repo)\n master = create_final(hospitals, 'zip_code', 'location_zip', 'hospitals')\n master_hosp = master[0]\n master_zips = master[1]\n master = create_food(foodPan, 'zip_code', 'food_Pantries', master_zips, master_hosp)\n master = create_prop(prop_Val, 'zipcode', 'zip_code', 'propery_value_average', master_zips, master)\n repo.dropPermanent('Food_Prop_Hosp_ZipCodes')\n repo.createPermanent('Food_Prop_Hosp_ZipCodes')\n repo['ckarjadi_johnnyg7.Food_Prop_Hosp_ZipCodes'].insert_many(master)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ckarjadi_johnnyg7', 'ckarjadi_johnnyg7')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('bdp', 'https://data.cityofboston.gov/resource/')\n this_script = doc.agent('alg:same_zip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource1 = doc.entity('dat:hospital_reduction', {'prov:label': 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataResource', prov.model.PROV_TYPE: 'ont:Computation'})\n get_FPH = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_FPH, this_script)\n doc.usage(get_FPH, resource1, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '???'})\n FPH = doc.entity('dat:ckarjadi_johnnyg7#FPH', {prov.model.PROV_LABEL: 'Food Pantries, Property Values under average, Hospitals reduced by Zip Code', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(FPH, this_script)\n doc.wasGeneratedBy(FPH, get_FPH, endTime)\n doc.wasDerivedFrom(FPH, resource1, get_FPH, get_FPH, get_FPH)\n repo.record(doc.serialize())\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "ckarjadi_johnnyg7/same_zip.py", "source_repo": "yinghang/course-2016-fal-proj", "split": "test", "star_events_count": 1} {"blob_id": "8b909430ca04435e7f86cfed4facf78938980a30", "bodies": ["for i in range(10000):\n if 3 ** i == n:\n return True\n elif 3 ** i > n:\n return False", "if n < 3:\n if n == 1:\n return True\n else:\n return False\nwhile n > 1:\n n_ = int(n / 3)\n if n_ * 3 == n:\n n = n_\n else:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n for i in range(10000):\n if 3 ** i == n:\n return True\n elif 3 ** i > n:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 3:\n if n == 1:\n return True\n else:\n return False\n while n > 1:\n n_ = int(n / 3)\n if n_ * 3 == n:\n n = n_\n else:\n return False\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for i in range(10000):\n if 3 ** i == n:\n return True\n elif 3 ** i > n:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 3:\n if n == 1:\n return True\n else:\n return False\n while n > 1:\n n_ = int(n / 3)\n if n_ * 3 == n:\n n = n_\n else:\n return False\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000165", "length_bytes": 890, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: bool", "name": "isPowerOfThree", "signature": "def isPowerOfThree(self, n)"}, {"docstring": ":type n: int :rtype: bool", "name": "isPowerOfThree2", "signature": "def isPowerOfThree2(self, n)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPowerOfThree(self, n): :type n: int :rtype: bool\n- def isPowerOfThree2(self, n): :type n: int :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isPowerOfThree(self, n): :type n: int :rtype: bool\n- def isPowerOfThree2(self, n): :type n: int :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for i in range(10000):\n if 3 ** i == n:\n return True\n elif 3 ** i > n:\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 3:\n if n == 1:\n return True\n else:\n return False\n while n > 1:\n n_ = int(n / 3)\n if n_ * 3 == n:\n n = n_\n else:\n return False\n return True\n<|end_body_1|>\n", "revision_id": "4c395a31e627b6096a601b9e30f82073ddc9b02b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_0|>\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isPowerOfThree(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n for i in range(10000):\n if 3 ** i == n:\n return True\n elif 3 ** i > n:\n return False\n\n def isPowerOfThree2(self, n):\n \"\"\":type n: int :rtype: bool\"\"\"\n if n < 3:\n if n == 1:\n return True\n else:\n return False\n while n > 1:\n n_ = int(n / 3)\n if n_ * 3 == n:\n n = n_\n else:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/easy/Power_of_Three.py", "source_repo": "xiangzz159/Python-Study", "split": "test", "star_events_count": 0} {"blob_id": "a012293ade129d98d8c85dc89b94c564b4280007", "bodies": ["try:\n app_id_list = get_cc_app_id_by_user()\n data_result = machine_statistics(table_set=KafkaBroker, field='ip', app_id_list=app_id_list)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\nexcept Exception as err:\n logger.error(f'kafka机器查询汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': '0', 'message': f'{err}'})", "try:\n app_dict = get_cc_app_id_by_user(res='print_name')\n data_result = machine_statistics_group_app_id_top_five(table_set=KafkaBroker, field='ip', app_dict=app_dict)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\nexcept Exception as err:\n logger.error(f'查询前5位业务的kafka机器使用数量汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'{err}'})", "try:\n post_data = request.data\n bk_username = request.user.username\n get_type = post_data.get('get_type')\n if get_type == 'cluster':\n data_result = get_data_in_cluster_monitor(post_data, bk_username)\n elif get_type == 'topic':\n data_result = get_data_in_topic_monitor(post_data, bk_username)\n elif get_type == 'consumer_group':\n data_result = get_data_in_consumer_group_monitor(post_data, bk_username)\n else:\n logger.warning(f'匹配不到传入的维度,请检测: get_type:{get_type}')\n data_result = []\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\nexcept Exception as err:\n logger.error(f'查询并汇总监控数据失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'查询并汇总监控数据失败:{err}'})", "try:\n post_data = request.data\n bk_username = request.user.username\n info = retrieval_kafka_install_monitor_param(post_data=post_data, bk_username=bk_username)\n if info['code'] == 0:\n return JsonResponse(info['data'])\n install_parameter = info['data']\n if install_kafka_cluster_monitor_flow(install_parameter):\n return JsonResponse({'result': True, 'data': [], 'code': 0, 'message': '监控部署任务已启动'})\n return JsonResponse({'result': False, 'data': [], 'code': 1, 'message': '监控部署任务失败'})\nexcept Exception as err:\n logger.error(f'创建集群监控失败:{err}')\n return JsonResponse({'result': False, 'data': [], 'message': f'{err}', 'code': 1})"], "bodies_text": "<|body_start_0|>\n try:\n app_id_list = get_cc_app_id_by_user()\n data_result = machine_statistics(table_set=KafkaBroker, field='ip', app_id_list=app_id_list)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'kafka机器查询汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': '0', 'message': f'{err}'})\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n app_dict = get_cc_app_id_by_user(res='print_name')\n data_result = machine_statistics_group_app_id_top_five(table_set=KafkaBroker, field='ip', app_dict=app_dict)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询前5位业务的kafka机器使用数量汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'{err}'})\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n post_data = request.data\n bk_username = request.user.username\n get_type = post_data.get('get_type')\n if get_type == 'cluster':\n data_result = get_data_in_cluster_monitor(post_data, bk_username)\n elif get_type == 'topic':\n data_result = get_data_in_topic_monitor(post_data, bk_username)\n elif get_type == 'consumer_group':\n data_result = get_data_in_consumer_group_monitor(post_data, bk_username)\n else:\n logger.warning(f'匹配不到传入的维度,请检测: get_type:{get_type}')\n data_result = []\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询并汇总监控数据失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'查询并汇总监控数据失败:{err}'})\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n post_data = request.data\n bk_username = request.user.username\n info = retrieval_kafka_install_monitor_param(post_data=post_data, bk_username=bk_username)\n if info['code'] == 0:\n return JsonResponse(info['data'])\n install_parameter = info['data']\n if install_kafka_cluster_monitor_flow(install_parameter):\n return JsonResponse({'result': True, 'data': [], 'code': 0, 'message': '监控部署任务已启动'})\n return JsonResponse({'result': False, 'data': [], 'code': 1, 'message': '监控部署任务失败'})\n except Exception as err:\n logger.error(f'创建集群监控失败:{err}')\n return JsonResponse({'result': False, 'data': [], 'message': f'{err}', 'code': 1})\n<|end_body_3|>\n", "class_docstring": "kafka broker信息表视图", "class_name": "KafkaBrokerViewSet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KafkaBrokerViewSet:\n \"\"\"kafka broker信息表视图\"\"\"\n\n def get_machine_statistics(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量\"\"\"\n <|body_0|>\n\n def get_machine_statistics_top_five(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5\"\"\"\n <|body_1|>\n\n def get_kafka_monitor_data(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式\"\"\"\n <|body_2|>\n\n def create_monitor(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/create_monitor 部署集群监控\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n app_id_list = get_cc_app_id_by_user()\n data_result = machine_statistics(table_set=KafkaBroker, field='ip', app_id_list=app_id_list)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'kafka机器查询汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': '0', 'message': f'{err}'})\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n app_dict = get_cc_app_id_by_user(res='print_name')\n data_result = machine_statistics_group_app_id_top_five(table_set=KafkaBroker, field='ip', app_dict=app_dict)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询前5位业务的kafka机器使用数量汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'{err}'})\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n post_data = request.data\n bk_username = request.user.username\n get_type = post_data.get('get_type')\n if get_type == 'cluster':\n data_result = get_data_in_cluster_monitor(post_data, bk_username)\n elif get_type == 'topic':\n data_result = get_data_in_topic_monitor(post_data, bk_username)\n elif get_type == 'consumer_group':\n data_result = get_data_in_consumer_group_monitor(post_data, bk_username)\n else:\n logger.warning(f'匹配不到传入的维度,请检测: get_type:{get_type}')\n data_result = []\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询并汇总监控数据失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'查询并汇总监控数据失败:{err}'})\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n post_data = request.data\n bk_username = request.user.username\n info = retrieval_kafka_install_monitor_param(post_data=post_data, bk_username=bk_username)\n if info['code'] == 0:\n return JsonResponse(info['data'])\n install_parameter = info['data']\n if install_kafka_cluster_monitor_flow(install_parameter):\n return JsonResponse({'result': True, 'data': [], 'code': 0, 'message': '监控部署任务已启动'})\n return JsonResponse({'result': False, 'data': [], 'code': 1, 'message': '监控部署任务失败'})\n except Exception as err:\n logger.error(f'创建集群监控失败:{err}')\n return JsonResponse({'result': False, 'data': [], 'message': f'{err}', 'code': 1})\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000166", "length_bytes": 12453, "license_type": "no_license", "methods": [{"docstring": "POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量", "name": "get_machine_statistics", "signature": "def get_machine_statistics(self, request, *args, **kwargs)"}, {"docstring": "POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5", "name": "get_machine_statistics_top_five", "signature": "def get_machine_statistics_top_five(self, request, *args, **kwargs)"}, {"docstring": "POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式", "name": "get_kafka_monitor_data", "signature": "def get_kafka_monitor_data(self, request, *args, **kwargs)"}, {"docstring": "POST /kafka/brokers/create_monitor 部署集群监控", "name": "create_monitor", "signature": "def create_monitor(self, request, *args, **kwargs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_034600", "prompt": "Implement the Python class `KafkaBrokerViewSet` described below.\n\nClass description:\nkafka broker信息表视图\n\nMethod signatures and docstrings:\n- def get_machine_statistics(self, request, *args, **kwargs): POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量\n- def get_machine_statistics_top_five(self, request, *args, **kwargs): POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5\n- def get_kafka_monitor_data(self, request, *args, **kwargs): POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式\n- def create_monitor(self, request, *args, **kwargs): POST /kafka/brokers/create_monitor 部署集群监控", "prompted_full_text": "Implement the Python class `KafkaBrokerViewSet` described below.\n\nClass description:\nkafka broker信息表视图\n\nMethod signatures and docstrings:\n- def get_machine_statistics(self, request, *args, **kwargs): POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量\n- def get_machine_statistics_top_five(self, request, *args, **kwargs): POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5\n- def get_kafka_monitor_data(self, request, *args, **kwargs): POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式\n- def create_monitor(self, request, *args, **kwargs): POST /kafka/brokers/create_monitor 部署集群监控\n\n<|skeleton|>\nclass KafkaBrokerViewSet:\n \"\"\"kafka broker信息表视图\"\"\"\n\n def get_machine_statistics(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量\"\"\"\n <|body_0|>\n\n def get_machine_statistics_top_five(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5\"\"\"\n <|body_1|>\n\n def get_kafka_monitor_data(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式\"\"\"\n <|body_2|>\n\n def create_monitor(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/create_monitor 部署集群监控\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n app_id_list = get_cc_app_id_by_user()\n data_result = machine_statistics(table_set=KafkaBroker, field='ip', app_id_list=app_id_list)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'kafka机器查询汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': '0', 'message': f'{err}'})\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n app_dict = get_cc_app_id_by_user(res='print_name')\n data_result = machine_statistics_group_app_id_top_five(table_set=KafkaBroker, field='ip', app_dict=app_dict)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询前5位业务的kafka机器使用数量汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'{err}'})\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n post_data = request.data\n bk_username = request.user.username\n get_type = post_data.get('get_type')\n if get_type == 'cluster':\n data_result = get_data_in_cluster_monitor(post_data, bk_username)\n elif get_type == 'topic':\n data_result = get_data_in_topic_monitor(post_data, bk_username)\n elif get_type == 'consumer_group':\n data_result = get_data_in_consumer_group_monitor(post_data, bk_username)\n else:\n logger.warning(f'匹配不到传入的维度,请检测: get_type:{get_type}')\n data_result = []\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询并汇总监控数据失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'查询并汇总监控数据失败:{err}'})\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n post_data = request.data\n bk_username = request.user.username\n info = retrieval_kafka_install_monitor_param(post_data=post_data, bk_username=bk_username)\n if info['code'] == 0:\n return JsonResponse(info['data'])\n install_parameter = info['data']\n if install_kafka_cluster_monitor_flow(install_parameter):\n return JsonResponse({'result': True, 'data': [], 'code': 0, 'message': '监控部署任务已启动'})\n return JsonResponse({'result': False, 'data': [], 'code': 1, 'message': '监控部署任务失败'})\n except Exception as err:\n logger.error(f'创建集群监控失败:{err}')\n return JsonResponse({'result': False, 'data': [], 'message': f'{err}', 'code': 1})\n<|end_body_3|>\n", "revision_id": "97cfac2ba94d67980d837f0b541caae70b68a595", "skeleton": "<|skeleton|>\nclass KafkaBrokerViewSet:\n \"\"\"kafka broker信息表视图\"\"\"\n\n def get_machine_statistics(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量\"\"\"\n <|body_0|>\n\n def get_machine_statistics_top_five(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5\"\"\"\n <|body_1|>\n\n def get_kafka_monitor_data(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式\"\"\"\n <|body_2|>\n\n def create_monitor(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/create_monitor 部署集群监控\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class KafkaBrokerViewSet:\n \"\"\"kafka broker信息表视图\"\"\"\n\n def get_machine_statistics(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics 统计kafka投入已使用的机器数量\"\"\"\n try:\n app_id_list = get_cc_app_id_by_user()\n data_result = machine_statistics(table_set=KafkaBroker, field='ip', app_id_list=app_id_list)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'kafka机器查询汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': '0', 'message': f'{err}'})\n\n def get_machine_statistics_top_five(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/get_machine_statistics_top_five 根据用户已有业务权限,查询每个业务的机器投入数量,输出TOP5\"\"\"\n try:\n app_dict = get_cc_app_id_by_user(res='print_name')\n data_result = machine_statistics_group_app_id_top_five(table_set=KafkaBroker, field='ip', app_dict=app_dict)\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询前5位业务的kafka机器使用数量汇总失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'{err}'})\n\n def get_kafka_monitor_data(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/monitor_data 根据用户返回过滤条件,查询监控平台的监控数据并汇总到前端 返回字典格式\"\"\"\n try:\n post_data = request.data\n bk_username = request.user.username\n get_type = post_data.get('get_type')\n if get_type == 'cluster':\n data_result = get_data_in_cluster_monitor(post_data, bk_username)\n elif get_type == 'topic':\n data_result = get_data_in_topic_monitor(post_data, bk_username)\n elif get_type == 'consumer_group':\n data_result = get_data_in_consumer_group_monitor(post_data, bk_username)\n else:\n logger.warning(f'匹配不到传入的维度,请检测: get_type:{get_type}')\n data_result = []\n return JsonResponse({'result': True, 'code': 0, 'data': data_result, 'message': 'query success'})\n except Exception as err:\n logger.error(f'查询并汇总监控数据失败:{err}')\n return JsonResponse({'result': False, 'code': 1, 'data': [], 'message': f'查询并汇总监控数据失败:{err}'})\n\n def create_monitor(self, request, *args, **kwargs):\n \"\"\"POST /kafka/brokers/create_monitor 部署集群监控\"\"\"\n try:\n post_data = request.data\n bk_username = request.user.username\n info = retrieval_kafka_install_monitor_param(post_data=post_data, bk_username=bk_username)\n if info['code'] == 0:\n return JsonResponse(info['data'])\n install_parameter = info['data']\n if install_kafka_cluster_monitor_flow(install_parameter):\n return JsonResponse({'result': True, 'data': [], 'code': 0, 'message': '监控部署任务已启动'})\n return JsonResponse({'result': False, 'data': [], 'code': 1, 'message': '监控部署任务失败'})\n except Exception as err:\n logger.error(f'创建集群监控失败:{err}')\n return JsonResponse({'result': False, 'data': [], 'message': f'{err}', 'code': 1})\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/kafka/views.py", "source_repo": "sdgdsffdsfff/bk-dop", "split": "test", "star_events_count": 0} {"blob_id": "94f2b919dae192fb6c8b0711ac5c7ff368e40702", "bodies": ["super().__init__()\nself.num_heads = num_heads\nself.concat_heads = concat_heads\nif self.concat_heads:\n assert c_out % num_heads == 0, 'Number of output features must be a multiple of the count of heads.'\n c_out = c_out // num_heads\nif c_in != 0:\n self.node_projection = nn.Linear(c_in, c_out * num_heads)\n nn.init.xavier_uniform_(self.node_projection.weight.data, gain=1.414)\nself.edge_projection = nn.Linear(edge_dim, c_out * num_heads)\nif c_in != 0:\n self.a = nn.Parameter(torch.Tensor(num_heads, 3 * c_out))\nelse:\n self.a = nn.Parameter(torch.Tensor(num_heads, c_out))\nself.leakyrelu = nn.LeakyReLU(alpha)\nnn.init.xavier_uniform_(self.edge_projection.weight.data, gain=1.414)\nnn.init.xavier_uniform_(self.a.data, gain=1.414)", "batch_size, num_nodes = (edge_feats.size(0), adj_matrix.size(-1))\nif node_feats != None:\n node_feats = self.node_projection(node_feats)\n node_feats = node_feats.view(batch_size, num_nodes, self.num_heads, -1)\n node_feats_flat = node_feats.view(batch_size * num_nodes, self.num_heads, -1)\nedge_feats = self.edge_projection(edge_feats)\nedge_feats = edge_feats.view(batch_size, num_nodes, num_nodes, self.num_heads, -1)\nedges = adj_matrix.nonzero(as_tuple=False)\nedge_feats_flat = edge_feats.view(batch_size * num_nodes * num_nodes, self.num_heads, -1)\nedge_indices_row = edges[:, 0] * num_nodes + edges[:, 1]\nedge_indices_col = edges[:, 0] * num_nodes + edges[:, 2]\nif node_feats != None:\n a_input = torch.cat([torch.index_select(input=node_feats_flat, index=edge_indices_row, dim=0), torch.index_select(input=node_feats_flat, index=edge_indices_col, dim=0), edge_feats_flat], dim=-1)\nelse:\n a_input = edge_feats_flat\nattn_logits = torch.einsum('bhc,hc->bh', a_input, self.a)\nattn_logits = self.leakyrelu(attn_logits)\nattn_matrix = attn_logits.new_zeros(adj_matrix.shape + (self.num_heads,)).fill_(-9000000000000000.0)\nattn_matrix[adj_matrix[..., None].repeat(1, 1, 1, self.num_heads) == 1] = attn_logits.reshape(-1)\nattn_probs = F.softmax(attn_matrix, dim=2)\nif print_attn_probs:\n print('Attention probs\\n', attn_probs.permute(0, 3, 1, 2))\nvalues = node_feats + edge_feats if node_feats != None else edge_feats\nnode_feats = torch.einsum('bijh,bijhc->bihc', attn_probs, values)\nif self.concat_heads:\n node_feats = node_feats.reshape(batch_size, num_nodes, -1)\nelse:\n node_feats = node_feats.mean(dim=2)\nreturn node_feats"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.num_heads = num_heads\n self.concat_heads = concat_heads\n if self.concat_heads:\n assert c_out % num_heads == 0, 'Number of output features must be a multiple of the count of heads.'\n c_out = c_out // num_heads\n if c_in != 0:\n self.node_projection = nn.Linear(c_in, c_out * num_heads)\n nn.init.xavier_uniform_(self.node_projection.weight.data, gain=1.414)\n self.edge_projection = nn.Linear(edge_dim, c_out * num_heads)\n if c_in != 0:\n self.a = nn.Parameter(torch.Tensor(num_heads, 3 * c_out))\n else:\n self.a = nn.Parameter(torch.Tensor(num_heads, c_out))\n self.leakyrelu = nn.LeakyReLU(alpha)\n nn.init.xavier_uniform_(self.edge_projection.weight.data, gain=1.414)\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, num_nodes = (edge_feats.size(0), adj_matrix.size(-1))\n if node_feats != None:\n node_feats = self.node_projection(node_feats)\n node_feats = node_feats.view(batch_size, num_nodes, self.num_heads, -1)\n node_feats_flat = node_feats.view(batch_size * num_nodes, self.num_heads, -1)\n edge_feats = self.edge_projection(edge_feats)\n edge_feats = edge_feats.view(batch_size, num_nodes, num_nodes, self.num_heads, -1)\n edges = adj_matrix.nonzero(as_tuple=False)\n edge_feats_flat = edge_feats.view(batch_size * num_nodes * num_nodes, self.num_heads, -1)\n edge_indices_row = edges[:, 0] * num_nodes + edges[:, 1]\n edge_indices_col = edges[:, 0] * num_nodes + edges[:, 2]\n if node_feats != None:\n a_input = torch.cat([torch.index_select(input=node_feats_flat, index=edge_indices_row, dim=0), torch.index_select(input=node_feats_flat, index=edge_indices_col, dim=0), edge_feats_flat], dim=-1)\n else:\n a_input = edge_feats_flat\n attn_logits = torch.einsum('bhc,hc->bh', a_input, self.a)\n attn_logits = self.leakyrelu(attn_logits)\n attn_matrix = attn_logits.new_zeros(adj_matrix.shape + (self.num_heads,)).fill_(-9000000000000000.0)\n attn_matrix[adj_matrix[..., None].repeat(1, 1, 1, self.num_heads) == 1] = attn_logits.reshape(-1)\n attn_probs = F.softmax(attn_matrix, dim=2)\n if print_attn_probs:\n print('Attention probs\\n', attn_probs.permute(0, 3, 1, 2))\n values = node_feats + edge_feats if node_feats != None else edge_feats\n node_feats = torch.einsum('bijh,bijhc->bihc', attn_probs, values)\n if self.concat_heads:\n node_feats = node_feats.reshape(batch_size, num_nodes, -1)\n else:\n node_feats = node_feats.mean(dim=2)\n return node_feats\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GATLayer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GATLayer:\n\n def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2):\n \"\"\"Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.\"\"\"\n <|body_0|>\n\n def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False):\n \"\"\"Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.num_heads = num_heads\n self.concat_heads = concat_heads\n if self.concat_heads:\n assert c_out % num_heads == 0, 'Number of output features must be a multiple of the count of heads.'\n c_out = c_out // num_heads\n if c_in != 0:\n self.node_projection = nn.Linear(c_in, c_out * num_heads)\n nn.init.xavier_uniform_(self.node_projection.weight.data, gain=1.414)\n self.edge_projection = nn.Linear(edge_dim, c_out * num_heads)\n if c_in != 0:\n self.a = nn.Parameter(torch.Tensor(num_heads, 3 * c_out))\n else:\n self.a = nn.Parameter(torch.Tensor(num_heads, c_out))\n self.leakyrelu = nn.LeakyReLU(alpha)\n nn.init.xavier_uniform_(self.edge_projection.weight.data, gain=1.414)\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, num_nodes = (edge_feats.size(0), adj_matrix.size(-1))\n if node_feats != None:\n node_feats = self.node_projection(node_feats)\n node_feats = node_feats.view(batch_size, num_nodes, self.num_heads, -1)\n node_feats_flat = node_feats.view(batch_size * num_nodes, self.num_heads, -1)\n edge_feats = self.edge_projection(edge_feats)\n edge_feats = edge_feats.view(batch_size, num_nodes, num_nodes, self.num_heads, -1)\n edges = adj_matrix.nonzero(as_tuple=False)\n edge_feats_flat = edge_feats.view(batch_size * num_nodes * num_nodes, self.num_heads, -1)\n edge_indices_row = edges[:, 0] * num_nodes + edges[:, 1]\n edge_indices_col = edges[:, 0] * num_nodes + edges[:, 2]\n if node_feats != None:\n a_input = torch.cat([torch.index_select(input=node_feats_flat, index=edge_indices_row, dim=0), torch.index_select(input=node_feats_flat, index=edge_indices_col, dim=0), edge_feats_flat], dim=-1)\n else:\n a_input = edge_feats_flat\n attn_logits = torch.einsum('bhc,hc->bh', a_input, self.a)\n attn_logits = self.leakyrelu(attn_logits)\n attn_matrix = attn_logits.new_zeros(adj_matrix.shape + (self.num_heads,)).fill_(-9000000000000000.0)\n attn_matrix[adj_matrix[..., None].repeat(1, 1, 1, self.num_heads) == 1] = attn_logits.reshape(-1)\n attn_probs = F.softmax(attn_matrix, dim=2)\n if print_attn_probs:\n print('Attention probs\\n', attn_probs.permute(0, 3, 1, 2))\n values = node_feats + edge_feats if node_feats != None else edge_feats\n node_feats = torch.einsum('bijh,bijhc->bihc', attn_probs, values)\n if self.concat_heads:\n node_feats = node_feats.reshape(batch_size, num_nodes, -1)\n else:\n node_feats = node_feats.mean(dim=2)\n return node_feats\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000167", "length_bytes": 7847, "license_type": "no_license", "methods": [{"docstring": "Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.", "name": "__init__", "signature": "def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2)"}, {"docstring": "Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)", "name": "forward", "signature": "def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026685", "prompt": "Implement the Python class `GATLayer` described below.\n\nClass description:\nImplement the GATLayer class.\n\nMethod signatures and docstrings:\n- def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2): Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.\n- def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False): Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)", "prompted_full_text": "Implement the Python class `GATLayer` described below.\n\nClass description:\nImplement the GATLayer class.\n\nMethod signatures and docstrings:\n- def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2): Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.\n- def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False): Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)\n\n<|skeleton|>\nclass GATLayer:\n\n def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2):\n \"\"\"Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.\"\"\"\n <|body_0|>\n\n def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False):\n \"\"\"Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.num_heads = num_heads\n self.concat_heads = concat_heads\n if self.concat_heads:\n assert c_out % num_heads == 0, 'Number of output features must be a multiple of the count of heads.'\n c_out = c_out // num_heads\n if c_in != 0:\n self.node_projection = nn.Linear(c_in, c_out * num_heads)\n nn.init.xavier_uniform_(self.node_projection.weight.data, gain=1.414)\n self.edge_projection = nn.Linear(edge_dim, c_out * num_heads)\n if c_in != 0:\n self.a = nn.Parameter(torch.Tensor(num_heads, 3 * c_out))\n else:\n self.a = nn.Parameter(torch.Tensor(num_heads, c_out))\n self.leakyrelu = nn.LeakyReLU(alpha)\n nn.init.xavier_uniform_(self.edge_projection.weight.data, gain=1.414)\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size, num_nodes = (edge_feats.size(0), adj_matrix.size(-1))\n if node_feats != None:\n node_feats = self.node_projection(node_feats)\n node_feats = node_feats.view(batch_size, num_nodes, self.num_heads, -1)\n node_feats_flat = node_feats.view(batch_size * num_nodes, self.num_heads, -1)\n edge_feats = self.edge_projection(edge_feats)\n edge_feats = edge_feats.view(batch_size, num_nodes, num_nodes, self.num_heads, -1)\n edges = adj_matrix.nonzero(as_tuple=False)\n edge_feats_flat = edge_feats.view(batch_size * num_nodes * num_nodes, self.num_heads, -1)\n edge_indices_row = edges[:, 0] * num_nodes + edges[:, 1]\n edge_indices_col = edges[:, 0] * num_nodes + edges[:, 2]\n if node_feats != None:\n a_input = torch.cat([torch.index_select(input=node_feats_flat, index=edge_indices_row, dim=0), torch.index_select(input=node_feats_flat, index=edge_indices_col, dim=0), edge_feats_flat], dim=-1)\n else:\n a_input = edge_feats_flat\n attn_logits = torch.einsum('bhc,hc->bh', a_input, self.a)\n attn_logits = self.leakyrelu(attn_logits)\n attn_matrix = attn_logits.new_zeros(adj_matrix.shape + (self.num_heads,)).fill_(-9000000000000000.0)\n attn_matrix[adj_matrix[..., None].repeat(1, 1, 1, self.num_heads) == 1] = attn_logits.reshape(-1)\n attn_probs = F.softmax(attn_matrix, dim=2)\n if print_attn_probs:\n print('Attention probs\\n', attn_probs.permute(0, 3, 1, 2))\n values = node_feats + edge_feats if node_feats != None else edge_feats\n node_feats = torch.einsum('bijh,bijhc->bihc', attn_probs, values)\n if self.concat_heads:\n node_feats = node_feats.reshape(batch_size, num_nodes, -1)\n else:\n node_feats = node_feats.mean(dim=2)\n return node_feats\n<|end_body_1|>\n", "revision_id": "793eb5d144cafa60614c4e7392657fe056ada684", "skeleton": "<|skeleton|>\nclass GATLayer:\n\n def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2):\n \"\"\"Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.\"\"\"\n <|body_0|>\n\n def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False):\n \"\"\"Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GATLayer:\n def __init__(self, c_in, c_out, edge_dim, num_heads, concat_heads=True, alpha=0.2):\n \"\"\"Inputs: c_in - Dimensionality of input features c_out - Dimensionality of output features num_heads - Number of heads, i.e. attention mechanisms to apply in parallel. The output features are equally split up over the heads if concat_heads=True. concat_heads - If True, the output of the different heads is concatenated instead of averaged. alpha - Negative slope of the LeakyReLU activation.\"\"\"\n super().__init__()\n self.num_heads = num_heads\n self.concat_heads = concat_heads\n if self.concat_heads:\n assert c_out % num_heads == 0, 'Number of output features must be a multiple of the count of heads.'\n c_out = c_out // num_heads\n if c_in != 0:\n self.node_projection = nn.Linear(c_in, c_out * num_heads)\n nn.init.xavier_uniform_(self.node_projection.weight.data, gain=1.414)\n self.edge_projection = nn.Linear(edge_dim, c_out * num_heads)\n if c_in != 0:\n self.a = nn.Parameter(torch.Tensor(num_heads, 3 * c_out))\n else:\n self.a = nn.Parameter(torch.Tensor(num_heads, c_out))\n self.leakyrelu = nn.LeakyReLU(alpha)\n nn.init.xavier_uniform_(self.edge_projection.weight.data, gain=1.414)\n nn.init.xavier_uniform_(self.a.data, gain=1.414)\n\n def forward(self, node_feats, edge_feats, adj_matrix, print_attn_probs=False):\n \"\"\"Inputs: node_feats - Input features of the node. Shape: [batch_size, c_in] adj_matrix - Adjacency matrix including self-connections. Shape: [batch_size, num_nodes, num_nodes] print_attn_probs - If True, the attention weights are printed during the forward pass (for debugging purposes)\"\"\"\n batch_size, num_nodes = (edge_feats.size(0), adj_matrix.size(-1))\n if node_feats != None:\n node_feats = self.node_projection(node_feats)\n node_feats = node_feats.view(batch_size, num_nodes, self.num_heads, -1)\n node_feats_flat = node_feats.view(batch_size * num_nodes, self.num_heads, -1)\n edge_feats = self.edge_projection(edge_feats)\n edge_feats = edge_feats.view(batch_size, num_nodes, num_nodes, self.num_heads, -1)\n edges = adj_matrix.nonzero(as_tuple=False)\n edge_feats_flat = edge_feats.view(batch_size * num_nodes * num_nodes, self.num_heads, -1)\n edge_indices_row = edges[:, 0] * num_nodes + edges[:, 1]\n edge_indices_col = edges[:, 0] * num_nodes + edges[:, 2]\n if node_feats != None:\n a_input = torch.cat([torch.index_select(input=node_feats_flat, index=edge_indices_row, dim=0), torch.index_select(input=node_feats_flat, index=edge_indices_col, dim=0), edge_feats_flat], dim=-1)\n else:\n a_input = edge_feats_flat\n attn_logits = torch.einsum('bhc,hc->bh', a_input, self.a)\n attn_logits = self.leakyrelu(attn_logits)\n attn_matrix = attn_logits.new_zeros(adj_matrix.shape + (self.num_heads,)).fill_(-9000000000000000.0)\n attn_matrix[adj_matrix[..., None].repeat(1, 1, 1, self.num_heads) == 1] = attn_logits.reshape(-1)\n attn_probs = F.softmax(attn_matrix, dim=2)\n if print_attn_probs:\n print('Attention probs\\n', attn_probs.permute(0, 3, 1, 2))\n values = node_feats + edge_feats if node_feats != None else edge_feats\n node_feats = torch.einsum('bijh,bijhc->bihc', attn_probs, values)\n if self.concat_heads:\n node_feats = node_feats.reshape(batch_size, num_nodes, -1)\n else:\n node_feats = node_feats.mean(dim=2)\n return node_feats\n", "source": "the_stack_v2_python_sparse", "source_path": "policy.py", "source_repo": "nadjarutsch/InterventionDesign", "split": "test", "star_events_count": 0} {"blob_id": "1174510aa96b9c8ce9c191ebb254aa12ee7e4276", "bodies": ["self.root = path\nself.params = params\nself.runtime = runtime", "if not self.params.appinfo:\n app_yaml = os.path.join(self.root, 'app.yaml')\n if not os.path.exists(app_yaml):\n notify('Writing [app.yaml] to [%s].' % self.root)\n runtime = 'custom' if self.params.custom else self.runtime\n with open(app_yaml, 'w') as f:\n f.write(PYTHON_APP_YAML.format(runtime=runtime))\n log.warn(APP_YAML_WARNING)\n return True\nreturn False", "if self.runtime == 'python-compat':\n dockerfile_preamble = COMPAT_DOCKERFILE_PREAMBLE\nelse:\n dockerfile_preamble = PYTHON27_DOCKERFILE_PREAMBLE\nall_config_files = []\ndockerfile_name = config.DOCKERFILE\ndockerfile_components = [dockerfile_preamble, DOCKERFILE_INSTALL_APP]\nif self.runtime == 'python-compat':\n dockerfile_components.append(DOCKERFILE_INSTALL_REQUIREMENTS_TXT)\ndockerfile_contents = ''.join((c for c in dockerfile_components))\ndockerfile = ext_runtime.GeneratedFile(dockerfile_name, dockerfile_contents)\nall_config_files.append(dockerfile)\ndockerignore = ext_runtime.GeneratedFile('.dockerignore', DOCKERIGNORE)\nall_config_files.append(dockerignore)\nreturn all_config_files", "notify = log.info if self.params.deploy else log.status.Print\nself.GenerateAppYaml(notify)\ncreated = False\nif self.params.custom or self.params.deploy:\n dockerfiles = self.GenerateDockerfileData()\n for dockerfile in dockerfiles:\n if dockerfile.WriteTo(self.root, notify):\n created = True\n if not created:\n notify('All config files already exist, not generating anything.')\nreturn created", "notify = log.info if self.params.deploy else log.status.Print\nself.GenerateAppYaml(notify)\nif not (self.params.custom or self.params.deploy):\n return []\nall_config_files = self.GenerateDockerfileData()\nreturn [f for f in all_config_files if not os.path.exists(os.path.join(self.root, f.filename))]"], "bodies_text": "<|body_start_0|>\n self.root = path\n self.params = params\n self.runtime = runtime\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.params.appinfo:\n app_yaml = os.path.join(self.root, 'app.yaml')\n if not os.path.exists(app_yaml):\n notify('Writing [app.yaml] to [%s].' % self.root)\n runtime = 'custom' if self.params.custom else self.runtime\n with open(app_yaml, 'w') as f:\n f.write(PYTHON_APP_YAML.format(runtime=runtime))\n log.warn(APP_YAML_WARNING)\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if self.runtime == 'python-compat':\n dockerfile_preamble = COMPAT_DOCKERFILE_PREAMBLE\n else:\n dockerfile_preamble = PYTHON27_DOCKERFILE_PREAMBLE\n all_config_files = []\n dockerfile_name = config.DOCKERFILE\n dockerfile_components = [dockerfile_preamble, DOCKERFILE_INSTALL_APP]\n if self.runtime == 'python-compat':\n dockerfile_components.append(DOCKERFILE_INSTALL_REQUIREMENTS_TXT)\n dockerfile_contents = ''.join((c for c in dockerfile_components))\n dockerfile = ext_runtime.GeneratedFile(dockerfile_name, dockerfile_contents)\n all_config_files.append(dockerfile)\n dockerignore = ext_runtime.GeneratedFile('.dockerignore', DOCKERIGNORE)\n all_config_files.append(dockerignore)\n return all_config_files\n<|end_body_2|>\n\n<|body_start_3|>\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n created = False\n if self.params.custom or self.params.deploy:\n dockerfiles = self.GenerateDockerfileData()\n for dockerfile in dockerfiles:\n if dockerfile.WriteTo(self.root, notify):\n created = True\n if not created:\n notify('All config files already exist, not generating anything.')\n return created\n<|end_body_3|>\n\n<|body_start_4|>\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n if not (self.params.custom or self.params.deploy):\n return []\n all_config_files = self.GenerateDockerfileData()\n return [f for f in all_config_files if not os.path.exists(os.path.join(self.root, f.filename))]\n<|end_body_4|>\n", "class_docstring": "Generates configuration for a Python application.", "class_name": "PythonConfigurator", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PythonConfigurator:\n \"\"\"Generates configuration for a Python application.\"\"\"\n\n def __init__(self, path, params, runtime):\n \"\"\"Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.\"\"\"\n <|body_0|>\n\n def GenerateAppYaml(self, notify):\n \"\"\"Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.\"\"\"\n <|body_1|>\n\n def GenerateDockerfileData(self):\n \"\"\"Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles\"\"\"\n <|body_2|>\n\n def GenerateConfigs(self):\n \"\"\"Generate all config files for the module.\"\"\"\n <|body_3|>\n\n def GenerateConfigData(self):\n \"\"\"Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.root = path\n self.params = params\n self.runtime = runtime\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.params.appinfo:\n app_yaml = os.path.join(self.root, 'app.yaml')\n if not os.path.exists(app_yaml):\n notify('Writing [app.yaml] to [%s].' % self.root)\n runtime = 'custom' if self.params.custom else self.runtime\n with open(app_yaml, 'w') as f:\n f.write(PYTHON_APP_YAML.format(runtime=runtime))\n log.warn(APP_YAML_WARNING)\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if self.runtime == 'python-compat':\n dockerfile_preamble = COMPAT_DOCKERFILE_PREAMBLE\n else:\n dockerfile_preamble = PYTHON27_DOCKERFILE_PREAMBLE\n all_config_files = []\n dockerfile_name = config.DOCKERFILE\n dockerfile_components = [dockerfile_preamble, DOCKERFILE_INSTALL_APP]\n if self.runtime == 'python-compat':\n dockerfile_components.append(DOCKERFILE_INSTALL_REQUIREMENTS_TXT)\n dockerfile_contents = ''.join((c for c in dockerfile_components))\n dockerfile = ext_runtime.GeneratedFile(dockerfile_name, dockerfile_contents)\n all_config_files.append(dockerfile)\n dockerignore = ext_runtime.GeneratedFile('.dockerignore', DOCKERIGNORE)\n all_config_files.append(dockerignore)\n return all_config_files\n<|end_body_2|>\n\n<|body_start_3|>\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n created = False\n if self.params.custom or self.params.deploy:\n dockerfiles = self.GenerateDockerfileData()\n for dockerfile in dockerfiles:\n if dockerfile.WriteTo(self.root, notify):\n created = True\n if not created:\n notify('All config files already exist, not generating anything.')\n return created\n<|end_body_3|>\n\n<|body_start_4|>\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n if not (self.params.custom or self.params.deploy):\n return []\n all_config_files = self.GenerateDockerfileData()\n return [f for f in all_config_files if not os.path.exists(os.path.join(self.root, f.filename))]\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000168", "length_bytes": 6335, "license_type": "permissive", "methods": [{"docstring": "Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.", "name": "__init__", "signature": "def __init__(self, path, params, runtime)"}, {"docstring": "Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.", "name": "GenerateAppYaml", "signature": "def GenerateAppYaml(self, notify)"}, {"docstring": "Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles", "name": "GenerateDockerfileData", "signature": "def GenerateDockerfileData(self)"}, {"docstring": "Generate all config files for the module.", "name": "GenerateConfigs", "signature": "def GenerateConfigs(self)"}, {"docstring": "Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated", "name": "GenerateConfigData", "signature": "def GenerateConfigData(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_044942", "prompt": "Implement the Python class `PythonConfigurator` described below.\n\nClass description:\nGenerates configuration for a Python application.\n\nMethod signatures and docstrings:\n- def __init__(self, path, params, runtime): Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.\n- def GenerateAppYaml(self, notify): Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.\n- def GenerateDockerfileData(self): Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles\n- def GenerateConfigs(self): Generate all config files for the module.\n- def GenerateConfigData(self): Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated", "prompted_full_text": "Implement the Python class `PythonConfigurator` described below.\n\nClass description:\nGenerates configuration for a Python application.\n\nMethod signatures and docstrings:\n- def __init__(self, path, params, runtime): Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.\n- def GenerateAppYaml(self, notify): Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.\n- def GenerateDockerfileData(self): Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles\n- def GenerateConfigs(self): Generate all config files for the module.\n- def GenerateConfigData(self): Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated\n\n<|skeleton|>\nclass PythonConfigurator:\n \"\"\"Generates configuration for a Python application.\"\"\"\n\n def __init__(self, path, params, runtime):\n \"\"\"Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.\"\"\"\n <|body_0|>\n\n def GenerateAppYaml(self, notify):\n \"\"\"Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.\"\"\"\n <|body_1|>\n\n def GenerateDockerfileData(self):\n \"\"\"Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles\"\"\"\n <|body_2|>\n\n def GenerateConfigs(self):\n \"\"\"Generate all config files for the module.\"\"\"\n <|body_3|>\n\n def GenerateConfigData(self):\n \"\"\"Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.root = path\n self.params = params\n self.runtime = runtime\n<|end_body_0|>\n\n<|body_start_1|>\n if not self.params.appinfo:\n app_yaml = os.path.join(self.root, 'app.yaml')\n if not os.path.exists(app_yaml):\n notify('Writing [app.yaml] to [%s].' % self.root)\n runtime = 'custom' if self.params.custom else self.runtime\n with open(app_yaml, 'w') as f:\n f.write(PYTHON_APP_YAML.format(runtime=runtime))\n log.warn(APP_YAML_WARNING)\n return True\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if self.runtime == 'python-compat':\n dockerfile_preamble = COMPAT_DOCKERFILE_PREAMBLE\n else:\n dockerfile_preamble = PYTHON27_DOCKERFILE_PREAMBLE\n all_config_files = []\n dockerfile_name = config.DOCKERFILE\n dockerfile_components = [dockerfile_preamble, DOCKERFILE_INSTALL_APP]\n if self.runtime == 'python-compat':\n dockerfile_components.append(DOCKERFILE_INSTALL_REQUIREMENTS_TXT)\n dockerfile_contents = ''.join((c for c in dockerfile_components))\n dockerfile = ext_runtime.GeneratedFile(dockerfile_name, dockerfile_contents)\n all_config_files.append(dockerfile)\n dockerignore = ext_runtime.GeneratedFile('.dockerignore', DOCKERIGNORE)\n all_config_files.append(dockerignore)\n return all_config_files\n<|end_body_2|>\n\n<|body_start_3|>\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n created = False\n if self.params.custom or self.params.deploy:\n dockerfiles = self.GenerateDockerfileData()\n for dockerfile in dockerfiles:\n if dockerfile.WriteTo(self.root, notify):\n created = True\n if not created:\n notify('All config files already exist, not generating anything.')\n return created\n<|end_body_3|>\n\n<|body_start_4|>\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n if not (self.params.custom or self.params.deploy):\n return []\n all_config_files = self.GenerateDockerfileData()\n return [f for f in all_config_files if not os.path.exists(os.path.join(self.root, f.filename))]\n<|end_body_4|>\n", "revision_id": "c98b58aeb0994e011df960163541e9379ae7ea06", "skeleton": "<|skeleton|>\nclass PythonConfigurator:\n \"\"\"Generates configuration for a Python application.\"\"\"\n\n def __init__(self, path, params, runtime):\n \"\"\"Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.\"\"\"\n <|body_0|>\n\n def GenerateAppYaml(self, notify):\n \"\"\"Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.\"\"\"\n <|body_1|>\n\n def GenerateDockerfileData(self):\n \"\"\"Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles\"\"\"\n <|body_2|>\n\n def GenerateConfigs(self):\n \"\"\"Generate all config files for the module.\"\"\"\n <|body_3|>\n\n def GenerateConfigData(self):\n \"\"\"Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PythonConfigurator:\n \"\"\"Generates configuration for a Python application.\"\"\"\n\n def __init__(self, path, params, runtime):\n \"\"\"Constructor. Args: path: (str) Root path of the source tree. params: (ext_runtime.Params) Parameters passed through to the fingerprinters. runtime: (str) The runtime name.\"\"\"\n self.root = path\n self.params = params\n self.runtime = runtime\n\n def GenerateAppYaml(self, notify):\n \"\"\"Generate app.yaml. Args: notify: depending on whether we're in deploy, write messages to the user or to log. Returns: (bool) True if file was written Note: this is not a recommended use-case, python-compat users likely have an existing app.yaml. But users can still get here with the --runtime flag.\"\"\"\n if not self.params.appinfo:\n app_yaml = os.path.join(self.root, 'app.yaml')\n if not os.path.exists(app_yaml):\n notify('Writing [app.yaml] to [%s].' % self.root)\n runtime = 'custom' if self.params.custom else self.runtime\n with open(app_yaml, 'w') as f:\n f.write(PYTHON_APP_YAML.format(runtime=runtime))\n log.warn(APP_YAML_WARNING)\n return True\n return False\n\n def GenerateDockerfileData(self):\n \"\"\"Generates dockerfiles. Returns: list(ext_runtime.GeneratedFile) the list of generated dockerfiles\"\"\"\n if self.runtime == 'python-compat':\n dockerfile_preamble = COMPAT_DOCKERFILE_PREAMBLE\n else:\n dockerfile_preamble = PYTHON27_DOCKERFILE_PREAMBLE\n all_config_files = []\n dockerfile_name = config.DOCKERFILE\n dockerfile_components = [dockerfile_preamble, DOCKERFILE_INSTALL_APP]\n if self.runtime == 'python-compat':\n dockerfile_components.append(DOCKERFILE_INSTALL_REQUIREMENTS_TXT)\n dockerfile_contents = ''.join((c for c in dockerfile_components))\n dockerfile = ext_runtime.GeneratedFile(dockerfile_name, dockerfile_contents)\n all_config_files.append(dockerfile)\n dockerignore = ext_runtime.GeneratedFile('.dockerignore', DOCKERIGNORE)\n all_config_files.append(dockerignore)\n return all_config_files\n\n def GenerateConfigs(self):\n \"\"\"Generate all config files for the module.\"\"\"\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n created = False\n if self.params.custom or self.params.deploy:\n dockerfiles = self.GenerateDockerfileData()\n for dockerfile in dockerfiles:\n if dockerfile.WriteTo(self.root, notify):\n created = True\n if not created:\n notify('All config files already exist, not generating anything.')\n return created\n\n def GenerateConfigData(self):\n \"\"\"Generate all config files for the module. Returns: list(ext_runtime.GeneratedFile) A list of the config files that were generated\"\"\"\n notify = log.info if self.params.deploy else log.status.Print\n self.GenerateAppYaml(notify)\n if not (self.params.custom or self.params.deploy):\n return []\n all_config_files = self.GenerateDockerfileData()\n return [f for f in all_config_files if not os.path.exists(os.path.join(self.root, f.filename))]\n", "source": "the_stack_v2_python_sparse", "source_path": "google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/app/runtimes/python_compat.py", "source_repo": "KaranToor/MA450", "split": "test", "star_events_count": 1} {"blob_id": "b1e2cdfe2b949699a12020eaf0d2c27725641ff7", "bodies": ["self._amps = 415\nself._voltage = 12\nself._max_capacity = self._amps * self._voltage * 3600\nself._current_capacity = float(0)\nself._charge_history = list()", "if self._current_capacity - w < 0:\n self._charge_history.append(self._current_capacity)\n return 0\nelse:\n self._current_capacity -= w\n self._charge_history.append(self._current_capacity)\n return 1", "if self._current_capacity + w >= self._max_capacity:\n self._charge_history.append(self._current_capacity)\n return 0\nelse:\n self._current_capacity += w\n self._charge_history.append(self._current_capacity)\n return 1", "if step is not None:\n return self._charge_history[int(step)]\nreturn self._current_capacity"], "bodies_text": "<|body_start_0|>\n self._amps = 415\n self._voltage = 12\n self._max_capacity = self._amps * self._voltage * 3600\n self._current_capacity = float(0)\n self._charge_history = list()\n<|end_body_0|>\n\n<|body_start_1|>\n if self._current_capacity - w < 0:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity -= w\n self._charge_history.append(self._current_capacity)\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self._current_capacity + w >= self._max_capacity:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity += w\n self._charge_history.append(self._current_capacity)\n return 1\n<|end_body_2|>\n\n<|body_start_3|>\n if step is not None:\n return self._charge_history[int(step)]\n return self._current_capacity\n<|end_body_3|>\n", "class_docstring": "Electrical Storage System, holds charges of backup power", "class_name": "ElectricalStorage", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ElectricalStorage:\n \"\"\"Electrical Storage System, holds charges of backup power\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Constructor for electrical storage system\"\"\"\n <|body_0|>\n\n def discharge(self, w) -> int:\n \"\"\"Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise\"\"\"\n <|body_1|>\n\n def charge(self, w) -> int:\n \"\"\"Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise\"\"\"\n <|body_2|>\n\n def current_charge(self, step=None) -> int:\n \"\"\"Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._amps = 415\n self._voltage = 12\n self._max_capacity = self._amps * self._voltage * 3600\n self._current_capacity = float(0)\n self._charge_history = list()\n<|end_body_0|>\n\n<|body_start_1|>\n if self._current_capacity - w < 0:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity -= w\n self._charge_history.append(self._current_capacity)\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self._current_capacity + w >= self._max_capacity:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity += w\n self._charge_history.append(self._current_capacity)\n return 1\n<|end_body_2|>\n\n<|body_start_3|>\n if step is not None:\n return self._charge_history[int(step)]\n return self._current_capacity\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000169", "length_bytes": 2634, "license_type": "no_license", "methods": [{"docstring": "Constructor for electrical storage system", "name": "__init__", "signature": "def __init__(self) -> None"}, {"docstring": "Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise", "name": "discharge", "signature": "def discharge(self, w) -> int"}, {"docstring": "Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise", "name": "charge", "signature": "def charge(self, w) -> int"}, {"docstring": "Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step", "name": "current_charge", "signature": "def current_charge(self, step=None) -> int"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_027607", "prompt": "Implement the Python class `ElectricalStorage` described below.\n\nClass description:\nElectrical Storage System, holds charges of backup power\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Constructor for electrical storage system\n- def discharge(self, w) -> int: Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise\n- def charge(self, w) -> int: Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise\n- def current_charge(self, step=None) -> int: Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step", "prompted_full_text": "Implement the Python class `ElectricalStorage` described below.\n\nClass description:\nElectrical Storage System, holds charges of backup power\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Constructor for electrical storage system\n- def discharge(self, w) -> int: Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise\n- def charge(self, w) -> int: Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise\n- def current_charge(self, step=None) -> int: Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step\n\n<|skeleton|>\nclass ElectricalStorage:\n \"\"\"Electrical Storage System, holds charges of backup power\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Constructor for electrical storage system\"\"\"\n <|body_0|>\n\n def discharge(self, w) -> int:\n \"\"\"Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise\"\"\"\n <|body_1|>\n\n def charge(self, w) -> int:\n \"\"\"Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise\"\"\"\n <|body_2|>\n\n def current_charge(self, step=None) -> int:\n \"\"\"Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._amps = 415\n self._voltage = 12\n self._max_capacity = self._amps * self._voltage * 3600\n self._current_capacity = float(0)\n self._charge_history = list()\n<|end_body_0|>\n\n<|body_start_1|>\n if self._current_capacity - w < 0:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity -= w\n self._charge_history.append(self._current_capacity)\n return 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self._current_capacity + w >= self._max_capacity:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity += w\n self._charge_history.append(self._current_capacity)\n return 1\n<|end_body_2|>\n\n<|body_start_3|>\n if step is not None:\n return self._charge_history[int(step)]\n return self._current_capacity\n<|end_body_3|>\n", "revision_id": "e2591a9c373bb891d17aacdfdbce0280d58c21c8", "skeleton": "<|skeleton|>\nclass ElectricalStorage:\n \"\"\"Electrical Storage System, holds charges of backup power\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Constructor for electrical storage system\"\"\"\n <|body_0|>\n\n def discharge(self, w) -> int:\n \"\"\"Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise\"\"\"\n <|body_1|>\n\n def charge(self, w) -> int:\n \"\"\"Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise\"\"\"\n <|body_2|>\n\n def current_charge(self, step=None) -> int:\n \"\"\"Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ElectricalStorage:\n \"\"\"Electrical Storage System, holds charges of backup power\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Constructor for electrical storage system\"\"\"\n self._amps = 415\n self._voltage = 12\n self._max_capacity = self._amps * self._voltage * 3600\n self._current_capacity = float(0)\n self._charge_history = list()\n\n def discharge(self, w) -> int:\n \"\"\"Discharge w Watts from the battery :param w: Watts to remove from the battery :type w: float :return: 1 if the battery was successfully discharged, 0 otherwise\"\"\"\n if self._current_capacity - w < 0:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity -= w\n self._charge_history.append(self._current_capacity)\n return 1\n\n def charge(self, w) -> int:\n \"\"\"Add w Watts to the battery :param w: Watts to add to the battery :type w: float :return: 1 if the battery was successfully charged, 0 otherwise\"\"\"\n if self._current_capacity + w >= self._max_capacity:\n self._charge_history.append(self._current_capacity)\n return 0\n else:\n self._current_capacity += w\n self._charge_history.append(self._current_capacity)\n return 1\n\n def current_charge(self, step=None) -> int:\n \"\"\"Returns the charge of the battery, either at the current step or at a specified step :param step: Step to evaluate. Default None :type step: int :return: Charge of electrical system at step\"\"\"\n if step is not None:\n return self._charge_history[int(step)]\n return self._current_capacity\n", "source": "the_stack_v2_python_sparse", "source_path": "es.py", "source_repo": "osdiallo/smart-neighborhood-simulation", "split": "test", "star_events_count": 0} {"blob_id": "0b0c1e8a86731cba2b2a2f1bbf78b24f0e618973", "bodies": ["pro_lists = Project.objects.all()\ncontent = {'pro_lists': pro_lists}\nreturn render(requests, 'project_env/env_add.html', content)", "logger.info('环境添加的参数:%s' % requests.POST)\nenv_url = requests.POST.get('env_url')\nenv = Environment.objects.filter(url=env_url)\nenv_name = requests.POST.get('env_name')\ndescription = requests.POST.get('description')\nprj_id = requests.POST.get('prj_id')\npro_lists = Project.objects.filter(prj_id=prj_id)\nenv_status = requests.POST.get('env_status')\nif env:\n messages.error(requests, '环境地址host重复了')\nelse:\n envs = Environment(env_name=env_name, url=env_url, description=description, status=env_status, project=pro_lists[0])\n envs.save()\n logger.info('新增环境完成!!!')\n return redirect(reverse('project_env:get_env', kwargs={'page_num': 1}))\nreturn render(requests, 'project_env/env_add.html', {'env_name': env_name, 'description': description, 'pro_lists': pro_lists, 'env_url': env_url, 'env_status': env_status, 'message': '该环境地址库中已经存在'})"], "bodies_text": "<|body_start_0|>\n pro_lists = Project.objects.all()\n content = {'pro_lists': pro_lists}\n return render(requests, 'project_env/env_add.html', content)\n<|end_body_0|>\n\n<|body_start_1|>\n logger.info('环境添加的参数:%s' % requests.POST)\n env_url = requests.POST.get('env_url')\n env = Environment.objects.filter(url=env_url)\n env_name = requests.POST.get('env_name')\n description = requests.POST.get('description')\n prj_id = requests.POST.get('prj_id')\n pro_lists = Project.objects.filter(prj_id=prj_id)\n env_status = requests.POST.get('env_status')\n if env:\n messages.error(requests, '环境地址host重复了')\n else:\n envs = Environment(env_name=env_name, url=env_url, description=description, status=env_status, project=pro_lists[0])\n envs.save()\n logger.info('新增环境完成!!!')\n return redirect(reverse('project_env:get_env', kwargs={'page_num': 1}))\n return render(requests, 'project_env/env_add.html', {'env_name': env_name, 'description': description, 'pro_lists': pro_lists, 'env_url': env_url, 'env_status': env_status, 'message': '该环境地址库中已经存在'})\n<|end_body_1|>\n", "class_docstring": "", "class_name": "add_env", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass add_env:\n\n def get(self, requests):\n \"\"\"获取添加环境页面\"\"\"\n <|body_0|>\n\n def post(self, requests):\n \"\"\"添加环境\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pro_lists = Project.objects.all()\n content = {'pro_lists': pro_lists}\n return render(requests, 'project_env/env_add.html', content)\n<|end_body_0|>\n\n<|body_start_1|>\n logger.info('环境添加的参数:%s' % requests.POST)\n env_url = requests.POST.get('env_url')\n env = Environment.objects.filter(url=env_url)\n env_name = requests.POST.get('env_name')\n description = requests.POST.get('description')\n prj_id = requests.POST.get('prj_id')\n pro_lists = Project.objects.filter(prj_id=prj_id)\n env_status = requests.POST.get('env_status')\n if env:\n messages.error(requests, '环境地址host重复了')\n else:\n envs = Environment(env_name=env_name, url=env_url, description=description, status=env_status, project=pro_lists[0])\n envs.save()\n logger.info('新增环境完成!!!')\n return redirect(reverse('project_env:get_env', kwargs={'page_num': 1}))\n return render(requests, 'project_env/env_add.html', {'env_name': env_name, 'description': description, 'pro_lists': pro_lists, 'env_url': env_url, 'env_status': env_status, 'message': '该环境地址库中已经存在'})\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000170", "length_bytes": 10476, "license_type": "no_license", "methods": [{"docstring": "获取添加环境页面", "name": "get", "signature": "def get(self, requests)"}, {"docstring": "添加环境", "name": "post", "signature": "def post(self, requests)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036930", "prompt": "Implement the Python class `add_env` described below.\n\nClass description:\nImplement the add_env class.\n\nMethod signatures and docstrings:\n- def get(self, requests): 获取添加环境页面\n- def post(self, requests): 添加环境", "prompted_full_text": "Implement the Python class `add_env` described below.\n\nClass description:\nImplement the add_env class.\n\nMethod signatures and docstrings:\n- def get(self, requests): 获取添加环境页面\n- def post(self, requests): 添加环境\n\n<|skeleton|>\nclass add_env:\n\n def get(self, requests):\n \"\"\"获取添加环境页面\"\"\"\n <|body_0|>\n\n def post(self, requests):\n \"\"\"添加环境\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pro_lists = Project.objects.all()\n content = {'pro_lists': pro_lists}\n return render(requests, 'project_env/env_add.html', content)\n<|end_body_0|>\n\n<|body_start_1|>\n logger.info('环境添加的参数:%s' % requests.POST)\n env_url = requests.POST.get('env_url')\n env = Environment.objects.filter(url=env_url)\n env_name = requests.POST.get('env_name')\n description = requests.POST.get('description')\n prj_id = requests.POST.get('prj_id')\n pro_lists = Project.objects.filter(prj_id=prj_id)\n env_status = requests.POST.get('env_status')\n if env:\n messages.error(requests, '环境地址host重复了')\n else:\n envs = Environment(env_name=env_name, url=env_url, description=description, status=env_status, project=pro_lists[0])\n envs.save()\n logger.info('新增环境完成!!!')\n return redirect(reverse('project_env:get_env', kwargs={'page_num': 1}))\n return render(requests, 'project_env/env_add.html', {'env_name': env_name, 'description': description, 'pro_lists': pro_lists, 'env_url': env_url, 'env_status': env_status, 'message': '该环境地址库中已经存在'})\n<|end_body_1|>\n", "revision_id": "c26d8a49bdcc309cca377639a72c67d5ff06b67e", "skeleton": "<|skeleton|>\nclass add_env:\n\n def get(self, requests):\n \"\"\"获取添加环境页面\"\"\"\n <|body_0|>\n\n def post(self, requests):\n \"\"\"添加环境\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class add_env:\n def get(self, requests):\n \"\"\"获取添加环境页面\"\"\"\n pro_lists = Project.objects.all()\n content = {'pro_lists': pro_lists}\n return render(requests, 'project_env/env_add.html', content)\n\n def post(self, requests):\n \"\"\"添加环境\"\"\"\n logger.info('环境添加的参数:%s' % requests.POST)\n env_url = requests.POST.get('env_url')\n env = Environment.objects.filter(url=env_url)\n env_name = requests.POST.get('env_name')\n description = requests.POST.get('description')\n prj_id = requests.POST.get('prj_id')\n pro_lists = Project.objects.filter(prj_id=prj_id)\n env_status = requests.POST.get('env_status')\n if env:\n messages.error(requests, '环境地址host重复了')\n else:\n envs = Environment(env_name=env_name, url=env_url, description=description, status=env_status, project=pro_lists[0])\n envs.save()\n logger.info('新增环境完成!!!')\n return redirect(reverse('project_env:get_env', kwargs={'page_num': 1}))\n return render(requests, 'project_env/env_add.html', {'env_name': env_name, 'description': description, 'pro_lists': pro_lists, 'env_url': env_url, 'env_status': env_status, 'message': '该环境地址库中已经存在'})\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/project_env/views.py", "source_repo": "wanyafei/AutoTestform", "split": "test", "star_events_count": 1} {"blob_id": "975d4681c830baa98c7ac17d141f53ca35064eab", "bodies": ["log_title = 'Command log of {0}'.format(self.name)\ninput_title = 'Command to {0}'.format(self.name)\nlog_config = nuqql.config.get('log_win')\nself.wins.log_win = nuqql.win.LogWin(log_config, self, log_title)\nself.wins.log_win.list = self.history.log\ninput_config = nuqql.config.get('input_win')\nself.wins.input_win = nuqql.win.InputWin(input_config, self, input_title)", "if self.notification > 0:\n notify = '# '\nelse:\n notify = ''\nreturn '{0}{{backend}} {1}'.format(notify, self.name)", "sort_notify = 0 - self.notification\nsort_type = 0\nsort_status = 0\nsort_name = self.name\nsort_type = 1\nreturn (sort_notify, sort_type, sort_status, sort_name)", "tstamp = datetime.datetime.now()\nlog_msg = nuqql.history.LogMessage(tstamp, 'you', msg, own=True)\nself.wins.log_win.add(log_msg)\nif self.backend is not None:\n self.backend.client.send_command(msg)"], "bodies_text": "<|body_start_0|>\n log_title = 'Command log of {0}'.format(self.name)\n input_title = 'Command to {0}'.format(self.name)\n log_config = nuqql.config.get('log_win')\n self.wins.log_win = nuqql.win.LogWin(log_config, self, log_title)\n self.wins.log_win.list = self.history.log\n input_config = nuqql.config.get('input_win')\n self.wins.input_win = nuqql.win.InputWin(input_config, self, input_title)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.notification > 0:\n notify = '# '\n else:\n notify = ''\n return '{0}{{backend}} {1}'.format(notify, self.name)\n<|end_body_1|>\n\n<|body_start_2|>\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n return (sort_notify, sort_type, sort_status, sort_name)\n<|end_body_2|>\n\n<|body_start_3|>\n tstamp = datetime.datetime.now()\n log_msg = nuqql.history.LogMessage(tstamp, 'you', msg, own=True)\n self.wins.log_win.add(log_msg)\n if self.backend is not None:\n self.backend.client.send_command(msg)\n<|end_body_3|>\n", "class_docstring": "Class for backend conversations", "class_name": "BackendConversation", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BackendConversation:\n \"\"\"Class for backend conversations\"\"\"\n\n def create_windows(self):\n \"\"\"Create windows for this conversation\"\"\"\n <|body_0|>\n\n def get_name(self):\n \"\"\"Get the name of the conversation, depending on type\"\"\"\n <|body_1|>\n\n def get_key(self):\n \"\"\"Get a key for sorting this conversation\"\"\"\n <|body_2|>\n\n def send_msg(self, msg):\n \"\"\"Send message coming from the UI/input window\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log_title = 'Command log of {0}'.format(self.name)\n input_title = 'Command to {0}'.format(self.name)\n log_config = nuqql.config.get('log_win')\n self.wins.log_win = nuqql.win.LogWin(log_config, self, log_title)\n self.wins.log_win.list = self.history.log\n input_config = nuqql.config.get('input_win')\n self.wins.input_win = nuqql.win.InputWin(input_config, self, input_title)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.notification > 0:\n notify = '# '\n else:\n notify = ''\n return '{0}{{backend}} {1}'.format(notify, self.name)\n<|end_body_1|>\n\n<|body_start_2|>\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n return (sort_notify, sort_type, sort_status, sort_name)\n<|end_body_2|>\n\n<|body_start_3|>\n tstamp = datetime.datetime.now()\n log_msg = nuqql.history.LogMessage(tstamp, 'you', msg, own=True)\n self.wins.log_win.add(log_msg)\n if self.backend is not None:\n self.backend.client.send_command(msg)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000171", "length_bytes": 18182, "license_type": "permissive", "methods": [{"docstring": "Create windows for this conversation", "name": "create_windows", "signature": "def create_windows(self)"}, {"docstring": "Get the name of the conversation, depending on type", "name": "get_name", "signature": "def get_name(self)"}, {"docstring": "Get a key for sorting this conversation", "name": "get_key", "signature": "def get_key(self)"}, {"docstring": "Send message coming from the UI/input window", "name": "send_msg", "signature": "def send_msg(self, msg)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_035697", "prompt": "Implement the Python class `BackendConversation` described below.\n\nClass description:\nClass for backend conversations\n\nMethod signatures and docstrings:\n- def create_windows(self): Create windows for this conversation\n- def get_name(self): Get the name of the conversation, depending on type\n- def get_key(self): Get a key for sorting this conversation\n- def send_msg(self, msg): Send message coming from the UI/input window", "prompted_full_text": "Implement the Python class `BackendConversation` described below.\n\nClass description:\nClass for backend conversations\n\nMethod signatures and docstrings:\n- def create_windows(self): Create windows for this conversation\n- def get_name(self): Get the name of the conversation, depending on type\n- def get_key(self): Get a key for sorting this conversation\n- def send_msg(self, msg): Send message coming from the UI/input window\n\n<|skeleton|>\nclass BackendConversation:\n \"\"\"Class for backend conversations\"\"\"\n\n def create_windows(self):\n \"\"\"Create windows for this conversation\"\"\"\n <|body_0|>\n\n def get_name(self):\n \"\"\"Get the name of the conversation, depending on type\"\"\"\n <|body_1|>\n\n def get_key(self):\n \"\"\"Get a key for sorting this conversation\"\"\"\n <|body_2|>\n\n def send_msg(self, msg):\n \"\"\"Send message coming from the UI/input window\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n log_title = 'Command log of {0}'.format(self.name)\n input_title = 'Command to {0}'.format(self.name)\n log_config = nuqql.config.get('log_win')\n self.wins.log_win = nuqql.win.LogWin(log_config, self, log_title)\n self.wins.log_win.list = self.history.log\n input_config = nuqql.config.get('input_win')\n self.wins.input_win = nuqql.win.InputWin(input_config, self, input_title)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.notification > 0:\n notify = '# '\n else:\n notify = ''\n return '{0}{{backend}} {1}'.format(notify, self.name)\n<|end_body_1|>\n\n<|body_start_2|>\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n return (sort_notify, sort_type, sort_status, sort_name)\n<|end_body_2|>\n\n<|body_start_3|>\n tstamp = datetime.datetime.now()\n log_msg = nuqql.history.LogMessage(tstamp, 'you', msg, own=True)\n self.wins.log_win.add(log_msg)\n if self.backend is not None:\n self.backend.client.send_command(msg)\n<|end_body_3|>\n", "revision_id": "c0142b207115a9a225970fb0e1d38092ba85ae1d", "skeleton": "<|skeleton|>\nclass BackendConversation:\n \"\"\"Class for backend conversations\"\"\"\n\n def create_windows(self):\n \"\"\"Create windows for this conversation\"\"\"\n <|body_0|>\n\n def get_name(self):\n \"\"\"Get the name of the conversation, depending on type\"\"\"\n <|body_1|>\n\n def get_key(self):\n \"\"\"Get a key for sorting this conversation\"\"\"\n <|body_2|>\n\n def send_msg(self, msg):\n \"\"\"Send message coming from the UI/input window\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BackendConversation:\n \"\"\"Class for backend conversations\"\"\"\n\n def create_windows(self):\n \"\"\"Create windows for this conversation\"\"\"\n log_title = 'Command log of {0}'.format(self.name)\n input_title = 'Command to {0}'.format(self.name)\n log_config = nuqql.config.get('log_win')\n self.wins.log_win = nuqql.win.LogWin(log_config, self, log_title)\n self.wins.log_win.list = self.history.log\n input_config = nuqql.config.get('input_win')\n self.wins.input_win = nuqql.win.InputWin(input_config, self, input_title)\n\n def get_name(self):\n \"\"\"Get the name of the conversation, depending on type\"\"\"\n if self.notification > 0:\n notify = '# '\n else:\n notify = ''\n return '{0}{{backend}} {1}'.format(notify, self.name)\n\n def get_key(self):\n \"\"\"Get a key for sorting this conversation\"\"\"\n sort_notify = 0 - self.notification\n sort_type = 0\n sort_status = 0\n sort_name = self.name\n sort_type = 1\n return (sort_notify, sort_type, sort_status, sort_name)\n\n def send_msg(self, msg):\n \"\"\"Send message coming from the UI/input window\"\"\"\n tstamp = datetime.datetime.now()\n log_msg = nuqql.history.LogMessage(tstamp, 'you', msg, own=True)\n self.wins.log_win.add(log_msg)\n if self.backend is not None:\n self.backend.client.send_command(msg)\n", "source": "the_stack_v2_python_sparse", "source_path": "nuqql/conversation.py", "source_repo": "modk/nuqql", "split": "test", "star_events_count": 0} {"blob_id": "6c2f2f0e721489b881abda5a78b757991ff10c9b", "bodies": ["prediction = self.predict(input_datum_or_data)\ncategory = np.argmax(prediction, axis=-1)\nreturn category", "category_labels = np.argmax(target_data, axis=-1)\ncorrect_prediction = self.classify(input_data) == category_labels\naccuracy = correct_prediction.mean()\nreturn accuracy"], "bodies_text": "<|body_start_0|>\n prediction = self.predict(input_datum_or_data)\n category = np.argmax(prediction, axis=-1)\n return category\n<|end_body_0|>\n\n<|body_start_1|>\n category_labels = np.argmax(target_data, axis=-1)\n correct_prediction = self.classify(input_data) == category_labels\n accuracy = correct_prediction.mean()\n return accuracy\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ClassifierModel", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClassifierModel:\n\n def classify(self, input_datum_or_data):\n \"\"\"Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.\"\"\"\n <|body_0|>\n\n def accuracy(self, input_data, target_data):\n \"\"\"Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prediction = self.predict(input_datum_or_data)\n category = np.argmax(prediction, axis=-1)\n return category\n<|end_body_0|>\n\n<|body_start_1|>\n category_labels = np.argmax(target_data, axis=-1)\n correct_prediction = self.classify(input_data) == category_labels\n accuracy = correct_prediction.mean()\n return accuracy\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000172", "length_bytes": 14064, "license_type": "no_license", "methods": [{"docstring": "Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.", "name": "classify", "signature": "def classify(self, input_datum_or_data)"}, {"docstring": "Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.", "name": "accuracy", "signature": "def accuracy(self, input_data, target_data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020374", "prompt": "Implement the Python class `ClassifierModel` described below.\n\nClass description:\nImplement the ClassifierModel class.\n\nMethod signatures and docstrings:\n- def classify(self, input_datum_or_data): Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.\n- def accuracy(self, input_data, target_data): Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.", "prompted_full_text": "Implement the Python class `ClassifierModel` described below.\n\nClass description:\nImplement the ClassifierModel class.\n\nMethod signatures and docstrings:\n- def classify(self, input_datum_or_data): Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.\n- def accuracy(self, input_data, target_data): Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.\n\n<|skeleton|>\nclass ClassifierModel:\n\n def classify(self, input_datum_or_data):\n \"\"\"Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.\"\"\"\n <|body_0|>\n\n def accuracy(self, input_data, target_data):\n \"\"\"Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n prediction = self.predict(input_datum_or_data)\n category = np.argmax(prediction, axis=-1)\n return category\n<|end_body_0|>\n\n<|body_start_1|>\n category_labels = np.argmax(target_data, axis=-1)\n correct_prediction = self.classify(input_data) == category_labels\n accuracy = correct_prediction.mean()\n return accuracy\n<|end_body_1|>\n", "revision_id": "ae4f68b0fc06fd260a8ce450be24f52c2d6c42e4", "skeleton": "<|skeleton|>\nclass ClassifierModel:\n\n def classify(self, input_datum_or_data):\n \"\"\"Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.\"\"\"\n <|body_0|>\n\n def accuracy(self, input_data, target_data):\n \"\"\"Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ClassifierModel:\n def classify(self, input_datum_or_data):\n \"\"\"Classifies a datum or each datum in a list of data. Args: input_datum_or_data: a 1-dimensional np.array of a single datum or a 2-dimensional np.array of data where each row is a datum. Returns: An integer (representing a label) if a single datum is passed in, or a list of integers (representing the labels) if multiple data is passed in.\"\"\"\n prediction = self.predict(input_datum_or_data)\n category = np.argmax(prediction, axis=-1)\n return category\n\n def accuracy(self, input_data, target_data):\n \"\"\"Computes the accuracy of the model classification predictions. Args: input_data: a 2-dimensional np.array of input data where each row is a datum. target_data: a 2-dimensional np.array of correct labels where each row is a probability distribution over the labels (or alternatively, a one-hot vector representation of the label). Returns: A float, the accuracy of the model for the given data.\"\"\"\n category_labels = np.argmax(target_data, axis=-1)\n correct_prediction = self.classify(input_data) == category_labels\n accuracy = correct_prediction.mean()\n return accuracy\n", "source": "the_stack_v2_python_sparse", "source_path": "RL-Berkeley/classification_sp16/models.py", "source_repo": "YuliangXiu/OpenCourse", "split": "test", "star_events_count": 12} {"blob_id": "aaa9089ba81d9668ad505d269c58188f54ded311", "bodies": ["Canvas.__init__(self)\nself.configure(width=larg, height=haut)\nself.larg, self.haut = (larg, haut)\nself.create_line(10, haut / 2, larg, haut / 2, arrow=LAST)\nself.create_line(10, haut - 5, 10, 5, arrow=LAST)\npas = (larg - 25) / 8.0\nfor t in range(1, 9):\n stx = 10 + t * pas\n self.create_line(stx, haut / 2 - 4, stx, haut / 2 + 4)", "curve = []\npas = (self.larg - 25) / 1000.0\nfor t in range(0, 1001, 5):\n e = ampl * sin(2 * pi * freq * t / 1000 - phase)\n x = 10 + t * pas\n y = self.haut / 2 - e * self.haut / 25\n curve.append((x, y))\nn = self.create_line(curve, fill=coul, smooth=1)"], "bodies_text": "<|body_start_0|>\n Canvas.__init__(self)\n self.configure(width=larg, height=haut)\n self.larg, self.haut = (larg, haut)\n self.create_line(10, haut / 2, larg, haut / 2, arrow=LAST)\n self.create_line(10, haut - 5, 10, 5, arrow=LAST)\n pas = (larg - 25) / 8.0\n for t in range(1, 9):\n stx = 10 + t * pas\n self.create_line(stx, haut / 2 - 4, stx, haut / 2 + 4)\n<|end_body_0|>\n\n<|body_start_1|>\n curve = []\n pas = (self.larg - 25) / 1000.0\n for t in range(0, 1001, 5):\n e = ampl * sin(2 * pi * freq * t / 1000 - phase)\n x = 10 + t * pas\n y = self.haut / 2 - e * self.haut / 25\n curve.append((x, y))\n n = self.create_line(curve, fill=coul, smooth=1)\n<|end_body_1|>\n", "class_docstring": "Canevas spécialisé pour dessiner des courbes élongation/temps", "class_name": "OscilloGraphe", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OscilloGraphe:\n \"\"\"Canevas spécialisé pour dessiner des courbes élongation/temps\"\"\"\n\n def __init__(self, boss=None, larg=200, haut=150):\n \"\"\"Constructeur du graphique : Axes et échelle horiz.\"\"\"\n <|body_0|>\n\n def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red'):\n \"\"\"Tracé d'un graphique élongation/temps sur 1 seconde\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Canvas.__init__(self)\n self.configure(width=larg, height=haut)\n self.larg, self.haut = (larg, haut)\n self.create_line(10, haut / 2, larg, haut / 2, arrow=LAST)\n self.create_line(10, haut - 5, 10, 5, arrow=LAST)\n pas = (larg - 25) / 8.0\n for t in range(1, 9):\n stx = 10 + t * pas\n self.create_line(stx, haut / 2 - 4, stx, haut / 2 + 4)\n<|end_body_0|>\n\n<|body_start_1|>\n curve = []\n pas = (self.larg - 25) / 1000.0\n for t in range(0, 1001, 5):\n e = ampl * sin(2 * pi * freq * t / 1000 - phase)\n x = 10 + t * pas\n y = self.haut / 2 - e * self.haut / 25\n curve.append((x, y))\n n = self.create_line(curve, fill=coul, smooth=1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000173", "length_bytes": 1791, "license_type": "no_license", "methods": [{"docstring": "Constructeur du graphique : Axes et échelle horiz.", "name": "__init__", "signature": "def __init__(self, boss=None, larg=200, haut=150)"}, {"docstring": "Tracé d'un graphique élongation/temps sur 1 seconde", "name": "traceCourbe", "signature": "def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red')"}], "n_methods": 2, "prompt": "Implement the Python class `OscilloGraphe` described below.\n\nClass description:\nCanevas spécialisé pour dessiner des courbes élongation/temps\n\nMethod signatures and docstrings:\n- def __init__(self, boss=None, larg=200, haut=150): Constructeur du graphique : Axes et échelle horiz.\n- def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red'): Tracé d'un graphique élongation/temps sur 1 seconde", "prompted_full_text": "Implement the Python class `OscilloGraphe` described below.\n\nClass description:\nCanevas spécialisé pour dessiner des courbes élongation/temps\n\nMethod signatures and docstrings:\n- def __init__(self, boss=None, larg=200, haut=150): Constructeur du graphique : Axes et échelle horiz.\n- def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red'): Tracé d'un graphique élongation/temps sur 1 seconde\n\n<|skeleton|>\nclass OscilloGraphe:\n \"\"\"Canevas spécialisé pour dessiner des courbes élongation/temps\"\"\"\n\n def __init__(self, boss=None, larg=200, haut=150):\n \"\"\"Constructeur du graphique : Axes et échelle horiz.\"\"\"\n <|body_0|>\n\n def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red'):\n \"\"\"Tracé d'un graphique élongation/temps sur 1 seconde\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Canvas.__init__(self)\n self.configure(width=larg, height=haut)\n self.larg, self.haut = (larg, haut)\n self.create_line(10, haut / 2, larg, haut / 2, arrow=LAST)\n self.create_line(10, haut - 5, 10, 5, arrow=LAST)\n pas = (larg - 25) / 8.0\n for t in range(1, 9):\n stx = 10 + t * pas\n self.create_line(stx, haut / 2 - 4, stx, haut / 2 + 4)\n<|end_body_0|>\n\n<|body_start_1|>\n curve = []\n pas = (self.larg - 25) / 1000.0\n for t in range(0, 1001, 5):\n e = ampl * sin(2 * pi * freq * t / 1000 - phase)\n x = 10 + t * pas\n y = self.haut / 2 - e * self.haut / 25\n curve.append((x, y))\n n = self.create_line(curve, fill=coul, smooth=1)\n<|end_body_1|>\n", "revision_id": "f38e71ec5737ad4b727da67c320d393fbe89c06c", "skeleton": "<|skeleton|>\nclass OscilloGraphe:\n \"\"\"Canevas spécialisé pour dessiner des courbes élongation/temps\"\"\"\n\n def __init__(self, boss=None, larg=200, haut=150):\n \"\"\"Constructeur du graphique : Axes et échelle horiz.\"\"\"\n <|body_0|>\n\n def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red'):\n \"\"\"Tracé d'un graphique élongation/temps sur 1 seconde\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OscilloGraphe:\n \"\"\"Canevas spécialisé pour dessiner des courbes élongation/temps\"\"\"\n\n def __init__(self, boss=None, larg=200, haut=150):\n \"\"\"Constructeur du graphique : Axes et échelle horiz.\"\"\"\n Canvas.__init__(self)\n self.configure(width=larg, height=haut)\n self.larg, self.haut = (larg, haut)\n self.create_line(10, haut / 2, larg, haut / 2, arrow=LAST)\n self.create_line(10, haut - 5, 10, 5, arrow=LAST)\n pas = (larg - 25) / 8.0\n for t in range(1, 9):\n stx = 10 + t * pas\n self.create_line(stx, haut / 2 - 4, stx, haut / 2 + 4)\n\n def traceCourbe(self, freq=1, phase=0, ampl=10, coul='red'):\n \"\"\"Tracé d'un graphique élongation/temps sur 1 seconde\"\"\"\n curve = []\n pas = (self.larg - 25) / 1000.0\n for t in range(0, 1001, 5):\n e = ampl * sin(2 * pi * freq * t / 1000 - phase)\n x = 10 + t * pas\n y = self.haut / 2 - e * self.haut / 25\n curve.append((x, y))\n n = self.create_line(curve, fill=coul, smooth=1)\n", "source": "the_stack_v2_python_sparse", "source_path": "oscillo.py", "source_repo": "i-me/Python2014", "split": "test", "star_events_count": 0} {"blob_id": "3d16a7d289c48233c94bf7c63a8f2d8954461bbc", "bodies": ["if self._course_key is None:\n self._course_key = CourseKey.from_string(self._results_fields['course'])\nreturn self._course_key", "if self._usage_key is None:\n usage_key = UsageKey.from_string(self._results_fields['id'])\n self._usage_key = usage_key.map_into_course(self.get_course_key())\nreturn self._usage_key", "if self._module_store is None:\n self._module_store = modulestore()\nreturn self._module_store", "course_key = self.get_course_key()\nif course_key not in self._course_blocks:\n root_block_usage_key = self.get_module_store().make_course_usage_key(course_key)\n self._course_blocks[course_key] = get_course_blocks(user, root_block_usage_key)\nreturn self._course_blocks[course_key]", "if 'course' not in self._results_fields or 'id' not in self._results_fields:\n raise ValueError('Must have course and id in order to build url')\nreturn reverse('jump_to', kwargs={'course_id': self._results_fields['course'], 'location': self._results_fields['id']})", "if has_access(user, 'staff', self.get_course_key()):\n return False\nreturn self.get_usage_key() not in self.get_course_blocks(user).get_block_keys()"], "bodies_text": "<|body_start_0|>\n if self._course_key is None:\n self._course_key = CourseKey.from_string(self._results_fields['course'])\n return self._course_key\n<|end_body_0|>\n\n<|body_start_1|>\n if self._usage_key is None:\n usage_key = UsageKey.from_string(self._results_fields['id'])\n self._usage_key = usage_key.map_into_course(self.get_course_key())\n return self._usage_key\n<|end_body_1|>\n\n<|body_start_2|>\n if self._module_store is None:\n self._module_store = modulestore()\n return self._module_store\n<|end_body_2|>\n\n<|body_start_3|>\n course_key = self.get_course_key()\n if course_key not in self._course_blocks:\n root_block_usage_key = self.get_module_store().make_course_usage_key(course_key)\n self._course_blocks[course_key] = get_course_blocks(user, root_block_usage_key)\n return self._course_blocks[course_key]\n<|end_body_3|>\n\n<|body_start_4|>\n if 'course' not in self._results_fields or 'id' not in self._results_fields:\n raise ValueError('Must have course and id in order to build url')\n return reverse('jump_to', kwargs={'course_id': self._results_fields['course'], 'location': self._results_fields['id']})\n<|end_body_4|>\n\n<|body_start_5|>\n if has_access(user, 'staff', self.get_course_key()):\n return False\n return self.get_usage_key() not in self.get_course_blocks(user).get_block_keys()\n<|end_body_5|>\n", "class_docstring": "SearchResultProcessor for LMS Search", "class_name": "LmsSearchResultProcessor", "detected_licenses": ["MIT", "AGPL-3.0-only", "AGPL-3.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LmsSearchResultProcessor:\n \"\"\"SearchResultProcessor for LMS Search\"\"\"\n\n def get_course_key(self):\n \"\"\"fetch course key object from string representation - retain result for subsequent uses\"\"\"\n <|body_0|>\n\n def get_usage_key(self):\n \"\"\"fetch usage key for component from string representation - retain result for subsequent uses\"\"\"\n <|body_1|>\n\n def get_module_store(self):\n \"\"\"module store accessor - retain result for subsequent uses\"\"\"\n <|body_2|>\n\n def get_course_blocks(self, user):\n \"\"\"fetch cached blocks for course - retain for subsequent use\"\"\"\n <|body_3|>\n\n def url(self):\n \"\"\"Property to display the url for the given location, useful for allowing navigation\"\"\"\n <|body_4|>\n\n def should_remove(self, user):\n \"\"\"Test to see if this result should be removed due to access restriction\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self._course_key is None:\n self._course_key = CourseKey.from_string(self._results_fields['course'])\n return self._course_key\n<|end_body_0|>\n\n<|body_start_1|>\n if self._usage_key is None:\n usage_key = UsageKey.from_string(self._results_fields['id'])\n self._usage_key = usage_key.map_into_course(self.get_course_key())\n return self._usage_key\n<|end_body_1|>\n\n<|body_start_2|>\n if self._module_store is None:\n self._module_store = modulestore()\n return self._module_store\n<|end_body_2|>\n\n<|body_start_3|>\n course_key = self.get_course_key()\n if course_key not in self._course_blocks:\n root_block_usage_key = self.get_module_store().make_course_usage_key(course_key)\n self._course_blocks[course_key] = get_course_blocks(user, root_block_usage_key)\n return self._course_blocks[course_key]\n<|end_body_3|>\n\n<|body_start_4|>\n if 'course' not in self._results_fields or 'id' not in self._results_fields:\n raise ValueError('Must have course and id in order to build url')\n return reverse('jump_to', kwargs={'course_id': self._results_fields['course'], 'location': self._results_fields['id']})\n<|end_body_4|>\n\n<|body_start_5|>\n if has_access(user, 'staff', self.get_course_key()):\n return False\n return self.get_usage_key() not in self.get_course_blocks(user).get_block_keys()\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000174", "length_bytes": 2768, "license_type": "permissive", "methods": [{"docstring": "fetch course key object from string representation - retain result for subsequent uses", "name": "get_course_key", "signature": "def get_course_key(self)"}, {"docstring": "fetch usage key for component from string representation - retain result for subsequent uses", "name": "get_usage_key", "signature": "def get_usage_key(self)"}, {"docstring": "module store accessor - retain result for subsequent uses", "name": "get_module_store", "signature": "def get_module_store(self)"}, {"docstring": "fetch cached blocks for course - retain for subsequent use", "name": "get_course_blocks", "signature": "def get_course_blocks(self, user)"}, {"docstring": "Property to display the url for the given location, useful for allowing navigation", "name": "url", "signature": "def url(self)"}, {"docstring": "Test to see if this result should be removed due to access restriction", "name": "should_remove", "signature": "def should_remove(self, user)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_014029", "prompt": "Implement the Python class `LmsSearchResultProcessor` described below.\n\nClass description:\nSearchResultProcessor for LMS Search\n\nMethod signatures and docstrings:\n- def get_course_key(self): fetch course key object from string representation - retain result for subsequent uses\n- def get_usage_key(self): fetch usage key for component from string representation - retain result for subsequent uses\n- def get_module_store(self): module store accessor - retain result for subsequent uses\n- def get_course_blocks(self, user): fetch cached blocks for course - retain for subsequent use\n- def url(self): Property to display the url for the given location, useful for allowing navigation\n- def should_remove(self, user): Test to see if this result should be removed due to access restriction", "prompted_full_text": "Implement the Python class `LmsSearchResultProcessor` described below.\n\nClass description:\nSearchResultProcessor for LMS Search\n\nMethod signatures and docstrings:\n- def get_course_key(self): fetch course key object from string representation - retain result for subsequent uses\n- def get_usage_key(self): fetch usage key for component from string representation - retain result for subsequent uses\n- def get_module_store(self): module store accessor - retain result for subsequent uses\n- def get_course_blocks(self, user): fetch cached blocks for course - retain for subsequent use\n- def url(self): Property to display the url for the given location, useful for allowing navigation\n- def should_remove(self, user): Test to see if this result should be removed due to access restriction\n\n<|skeleton|>\nclass LmsSearchResultProcessor:\n \"\"\"SearchResultProcessor for LMS Search\"\"\"\n\n def get_course_key(self):\n \"\"\"fetch course key object from string representation - retain result for subsequent uses\"\"\"\n <|body_0|>\n\n def get_usage_key(self):\n \"\"\"fetch usage key for component from string representation - retain result for subsequent uses\"\"\"\n <|body_1|>\n\n def get_module_store(self):\n \"\"\"module store accessor - retain result for subsequent uses\"\"\"\n <|body_2|>\n\n def get_course_blocks(self, user):\n \"\"\"fetch cached blocks for course - retain for subsequent use\"\"\"\n <|body_3|>\n\n def url(self):\n \"\"\"Property to display the url for the given location, useful for allowing navigation\"\"\"\n <|body_4|>\n\n def should_remove(self, user):\n \"\"\"Test to see if this result should be removed due to access restriction\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self._course_key is None:\n self._course_key = CourseKey.from_string(self._results_fields['course'])\n return self._course_key\n<|end_body_0|>\n\n<|body_start_1|>\n if self._usage_key is None:\n usage_key = UsageKey.from_string(self._results_fields['id'])\n self._usage_key = usage_key.map_into_course(self.get_course_key())\n return self._usage_key\n<|end_body_1|>\n\n<|body_start_2|>\n if self._module_store is None:\n self._module_store = modulestore()\n return self._module_store\n<|end_body_2|>\n\n<|body_start_3|>\n course_key = self.get_course_key()\n if course_key not in self._course_blocks:\n root_block_usage_key = self.get_module_store().make_course_usage_key(course_key)\n self._course_blocks[course_key] = get_course_blocks(user, root_block_usage_key)\n return self._course_blocks[course_key]\n<|end_body_3|>\n\n<|body_start_4|>\n if 'course' not in self._results_fields or 'id' not in self._results_fields:\n raise ValueError('Must have course and id in order to build url')\n return reverse('jump_to', kwargs={'course_id': self._results_fields['course'], 'location': self._results_fields['id']})\n<|end_body_4|>\n\n<|body_start_5|>\n if has_access(user, 'staff', self.get_course_key()):\n return False\n return self.get_usage_key() not in self.get_course_blocks(user).get_block_keys()\n<|end_body_5|>\n", "revision_id": "5809eaca7079a15ee56b0b7fcfea425337046c97", "skeleton": "<|skeleton|>\nclass LmsSearchResultProcessor:\n \"\"\"SearchResultProcessor for LMS Search\"\"\"\n\n def get_course_key(self):\n \"\"\"fetch course key object from string representation - retain result for subsequent uses\"\"\"\n <|body_0|>\n\n def get_usage_key(self):\n \"\"\"fetch usage key for component from string representation - retain result for subsequent uses\"\"\"\n <|body_1|>\n\n def get_module_store(self):\n \"\"\"module store accessor - retain result for subsequent uses\"\"\"\n <|body_2|>\n\n def get_course_blocks(self, user):\n \"\"\"fetch cached blocks for course - retain for subsequent use\"\"\"\n <|body_3|>\n\n def url(self):\n \"\"\"Property to display the url for the given location, useful for allowing navigation\"\"\"\n <|body_4|>\n\n def should_remove(self, user):\n \"\"\"Test to see if this result should be removed due to access restriction\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LmsSearchResultProcessor:\n \"\"\"SearchResultProcessor for LMS Search\"\"\"\n\n def get_course_key(self):\n \"\"\"fetch course key object from string representation - retain result for subsequent uses\"\"\"\n if self._course_key is None:\n self._course_key = CourseKey.from_string(self._results_fields['course'])\n return self._course_key\n\n def get_usage_key(self):\n \"\"\"fetch usage key for component from string representation - retain result for subsequent uses\"\"\"\n if self._usage_key is None:\n usage_key = UsageKey.from_string(self._results_fields['id'])\n self._usage_key = usage_key.map_into_course(self.get_course_key())\n return self._usage_key\n\n def get_module_store(self):\n \"\"\"module store accessor - retain result for subsequent uses\"\"\"\n if self._module_store is None:\n self._module_store = modulestore()\n return self._module_store\n\n def get_course_blocks(self, user):\n \"\"\"fetch cached blocks for course - retain for subsequent use\"\"\"\n course_key = self.get_course_key()\n if course_key not in self._course_blocks:\n root_block_usage_key = self.get_module_store().make_course_usage_key(course_key)\n self._course_blocks[course_key] = get_course_blocks(user, root_block_usage_key)\n return self._course_blocks[course_key]\n\n def url(self):\n \"\"\"Property to display the url for the given location, useful for allowing navigation\"\"\"\n if 'course' not in self._results_fields or 'id' not in self._results_fields:\n raise ValueError('Must have course and id in order to build url')\n return reverse('jump_to', kwargs={'course_id': self._results_fields['course'], 'location': self._results_fields['id']})\n\n def should_remove(self, user):\n \"\"\"Test to see if this result should be removed due to access restriction\"\"\"\n if has_access(user, 'staff', self.get_course_key()):\n return False\n return self.get_usage_key() not in self.get_course_blocks(user).get_block_keys()\n", "source": "the_stack_v2_python_sparse", "source_path": "Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/lib/courseware_search/lms_result_processor.py", "source_repo": "luque/better-ways-of-thinking-about-software", "split": "test", "star_events_count": 3} {"blob_id": "dfd988aacfdcbb96c7f437addd6440a31ee03db7", "bodies": ["super(ModuleUIFrame, self).__init__(parent)\nself.columnconfigure(0, weight=1)\nself.rowconfigure(1, weight=1)\nfrom ....datatools import get_data\ndata = get_data()\napi_frame = ttk.LabelFrame(self, padding=8, text='Google API')\napi_frame.grid(row=0, column=0, sticky='W E N S')\napi_frame.columnconfigure(0, weight=1)\nself.google_api_key = tk.StringVar()\nttk.Label(api_frame, text='Google API Key').grid(column=0, row=0, sticky='W E N S')\nttk.Entry(api_frame, textvariable=self.google_api_key).grid(column=0, row=1, padx=0, pady=4, sticky='W E N S')\nself.soundcloud_client_id = tk.StringVar()\nttk.Label(api_frame, text='SoundCloud Client ID').grid(column=0, row=2, sticky='W E N S')\nttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(column=0, row=3, padx=0, pady=4, sticky='W E N S')\nttk.Button(api_frame, command=lambda: self.update_keys(), text='Update API Data').grid(column=0, row=4, padx=0, pady=4, sticky='W E N S')\nif 'google_api_key' in data['discord']['keys']:\n self.google_api_key.set(data['discord']['keys']['google_api_key'])\nif 'soundcloud_client_id' in data['discord']['keys']:\n self.soundcloud_client_id.set(data['discord']['keys']['soundcloud_client_id'])", "from ...main import add_api_key\nadd_api_key('google_api_key', self.google_api_key.get())\nadd_api_key('soundcloud_client_id', self.soundcloud_client_id.get())"], "bodies_text": "<|body_start_0|>\n super(ModuleUIFrame, self).__init__(parent)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(1, weight=1)\n from ....datatools import get_data\n data = get_data()\n api_frame = ttk.LabelFrame(self, padding=8, text='Google API')\n api_frame.grid(row=0, column=0, sticky='W E N S')\n api_frame.columnconfigure(0, weight=1)\n self.google_api_key = tk.StringVar()\n ttk.Label(api_frame, text='Google API Key').grid(column=0, row=0, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.google_api_key).grid(column=0, row=1, padx=0, pady=4, sticky='W E N S')\n self.soundcloud_client_id = tk.StringVar()\n ttk.Label(api_frame, text='SoundCloud Client ID').grid(column=0, row=2, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(column=0, row=3, padx=0, pady=4, sticky='W E N S')\n ttk.Button(api_frame, command=lambda: self.update_keys(), text='Update API Data').grid(column=0, row=4, padx=0, pady=4, sticky='W E N S')\n if 'google_api_key' in data['discord']['keys']:\n self.google_api_key.set(data['discord']['keys']['google_api_key'])\n if 'soundcloud_client_id' in data['discord']['keys']:\n self.soundcloud_client_id.set(data['discord']['keys']['soundcloud_client_id'])\n<|end_body_0|>\n\n<|body_start_1|>\n from ...main import add_api_key\n add_api_key('google_api_key', self.google_api_key.get())\n add_api_key('soundcloud_client_id', self.soundcloud_client_id.get())\n<|end_body_1|>\n", "class_docstring": "The UI for the music module", "class_name": "ModuleUIFrame", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModuleUIFrame:\n \"\"\"The UI for the music module\"\"\"\n\n def __init__(self, parent):\n \"\"\"Create a new UI for the module Args: parent: A tk or ttk object\"\"\"\n <|body_0|>\n\n def update_keys(self):\n \"\"\"Updates the Google API key with the text value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ModuleUIFrame, self).__init__(parent)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(1, weight=1)\n from ....datatools import get_data\n data = get_data()\n api_frame = ttk.LabelFrame(self, padding=8, text='Google API')\n api_frame.grid(row=0, column=0, sticky='W E N S')\n api_frame.columnconfigure(0, weight=1)\n self.google_api_key = tk.StringVar()\n ttk.Label(api_frame, text='Google API Key').grid(column=0, row=0, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.google_api_key).grid(column=0, row=1, padx=0, pady=4, sticky='W E N S')\n self.soundcloud_client_id = tk.StringVar()\n ttk.Label(api_frame, text='SoundCloud Client ID').grid(column=0, row=2, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(column=0, row=3, padx=0, pady=4, sticky='W E N S')\n ttk.Button(api_frame, command=lambda: self.update_keys(), text='Update API Data').grid(column=0, row=4, padx=0, pady=4, sticky='W E N S')\n if 'google_api_key' in data['discord']['keys']:\n self.google_api_key.set(data['discord']['keys']['google_api_key'])\n if 'soundcloud_client_id' in data['discord']['keys']:\n self.soundcloud_client_id.set(data['discord']['keys']['soundcloud_client_id'])\n<|end_body_0|>\n\n<|body_start_1|>\n from ...main import add_api_key\n add_api_key('google_api_key', self.google_api_key.get())\n add_api_key('soundcloud_client_id', self.soundcloud_client_id.get())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000175", "length_bytes": 2052, "license_type": "no_license", "methods": [{"docstring": "Create a new UI for the module Args: parent: A tk or ttk object", "name": "__init__", "signature": "def __init__(self, parent)"}, {"docstring": "Updates the Google API key with the text value", "name": "update_keys", "signature": "def update_keys(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_015129", "prompt": "Implement the Python class `ModuleUIFrame` described below.\n\nClass description:\nThe UI for the music module\n\nMethod signatures and docstrings:\n- def __init__(self, parent): Create a new UI for the module Args: parent: A tk or ttk object\n- def update_keys(self): Updates the Google API key with the text value", "prompted_full_text": "Implement the Python class `ModuleUIFrame` described below.\n\nClass description:\nThe UI for the music module\n\nMethod signatures and docstrings:\n- def __init__(self, parent): Create a new UI for the module Args: parent: A tk or ttk object\n- def update_keys(self): Updates the Google API key with the text value\n\n<|skeleton|>\nclass ModuleUIFrame:\n \"\"\"The UI for the music module\"\"\"\n\n def __init__(self, parent):\n \"\"\"Create a new UI for the module Args: parent: A tk or ttk object\"\"\"\n <|body_0|>\n\n def update_keys(self):\n \"\"\"Updates the Google API key with the text value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ModuleUIFrame, self).__init__(parent)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(1, weight=1)\n from ....datatools import get_data\n data = get_data()\n api_frame = ttk.LabelFrame(self, padding=8, text='Google API')\n api_frame.grid(row=0, column=0, sticky='W E N S')\n api_frame.columnconfigure(0, weight=1)\n self.google_api_key = tk.StringVar()\n ttk.Label(api_frame, text='Google API Key').grid(column=0, row=0, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.google_api_key).grid(column=0, row=1, padx=0, pady=4, sticky='W E N S')\n self.soundcloud_client_id = tk.StringVar()\n ttk.Label(api_frame, text='SoundCloud Client ID').grid(column=0, row=2, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(column=0, row=3, padx=0, pady=4, sticky='W E N S')\n ttk.Button(api_frame, command=lambda: self.update_keys(), text='Update API Data').grid(column=0, row=4, padx=0, pady=4, sticky='W E N S')\n if 'google_api_key' in data['discord']['keys']:\n self.google_api_key.set(data['discord']['keys']['google_api_key'])\n if 'soundcloud_client_id' in data['discord']['keys']:\n self.soundcloud_client_id.set(data['discord']['keys']['soundcloud_client_id'])\n<|end_body_0|>\n\n<|body_start_1|>\n from ...main import add_api_key\n add_api_key('google_api_key', self.google_api_key.get())\n add_api_key('soundcloud_client_id', self.soundcloud_client_id.get())\n<|end_body_1|>\n", "revision_id": "6baa421d74ccdaa46195523f5b8a72ab1a70b1ec", "skeleton": "<|skeleton|>\nclass ModuleUIFrame:\n \"\"\"The UI for the music module\"\"\"\n\n def __init__(self, parent):\n \"\"\"Create a new UI for the module Args: parent: A tk or ttk object\"\"\"\n <|body_0|>\n\n def update_keys(self):\n \"\"\"Updates the Google API key with the text value\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModuleUIFrame:\n \"\"\"The UI for the music module\"\"\"\n\n def __init__(self, parent):\n \"\"\"Create a new UI for the module Args: parent: A tk or ttk object\"\"\"\n super(ModuleUIFrame, self).__init__(parent)\n self.columnconfigure(0, weight=1)\n self.rowconfigure(1, weight=1)\n from ....datatools import get_data\n data = get_data()\n api_frame = ttk.LabelFrame(self, padding=8, text='Google API')\n api_frame.grid(row=0, column=0, sticky='W E N S')\n api_frame.columnconfigure(0, weight=1)\n self.google_api_key = tk.StringVar()\n ttk.Label(api_frame, text='Google API Key').grid(column=0, row=0, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.google_api_key).grid(column=0, row=1, padx=0, pady=4, sticky='W E N S')\n self.soundcloud_client_id = tk.StringVar()\n ttk.Label(api_frame, text='SoundCloud Client ID').grid(column=0, row=2, sticky='W E N S')\n ttk.Entry(api_frame, textvariable=self.soundcloud_client_id).grid(column=0, row=3, padx=0, pady=4, sticky='W E N S')\n ttk.Button(api_frame, command=lambda: self.update_keys(), text='Update API Data').grid(column=0, row=4, padx=0, pady=4, sticky='W E N S')\n if 'google_api_key' in data['discord']['keys']:\n self.google_api_key.set(data['discord']['keys']['google_api_key'])\n if 'soundcloud_client_id' in data['discord']['keys']:\n self.soundcloud_client_id.set(data['discord']['keys']['soundcloud_client_id'])\n\n def update_keys(self):\n \"\"\"Updates the Google API key with the text value\"\"\"\n from ...main import add_api_key\n add_api_key('google_api_key', self.google_api_key.get())\n add_api_key('soundcloud_client_id', self.soundcloud_client_id.get())\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/Lib/site-packages/modis/discord_modis/modules/music/_ui.py", "source_repo": "zjq02010/LearnPython", "split": "test", "star_events_count": 0} {"blob_id": "d5954522ae915b0620271f9f75260fc561af4864", "bodies": ["customer = Customer()\ncustomer.id = _id\ncustomer = CustomerHelper.get_detail(customer)\nreq.context['result'] = {'data': customer.to_dict(), 'status': {'code': 200, 'message': 'success'}}\nres.status = falcon.HTTP_200", "customer = Customer()\ncustomer.id = _id\nCustomerHelper.delete(customer)\nreq.context['result'] = {'status': {'code': 200, 'message': 'success'}}\nres.status = falcon.HTTP_200", "customer = CustomerHelper.parse_from_query_string_request(req)\ncustomer.id = _id\nCustomerHelper.update(customer)\nreq.context['result'] = {'status': {'code': 200, 'message': 'success'}}\nres.status = falcon.HTTP_200"], "bodies_text": "<|body_start_0|>\n customer = Customer()\n customer.id = _id\n customer = CustomerHelper.get_detail(customer)\n req.context['result'] = {'data': customer.to_dict(), 'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_0|>\n\n<|body_start_1|>\n customer = Customer()\n customer.id = _id\n CustomerHelper.delete(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_1|>\n\n<|body_start_2|>\n customer = CustomerHelper.parse_from_query_string_request(req)\n customer.id = _id\n CustomerHelper.update(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_2|>\n", "class_docstring": "CustomerListner", "class_name": "CustomerListener", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomerListener:\n \"\"\"CustomerListner\"\"\"\n\n def on_get(self, req, res, _id):\n \"\"\"handle GET requests\"\"\"\n <|body_0|>\n\n def on_delete(self, req, res, _id):\n \"\"\"handle DELETE requests\"\"\"\n <|body_1|>\n\n def on_put(self, req, res, _id):\n \"\"\"handle PUT requests\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n customer = Customer()\n customer.id = _id\n customer = CustomerHelper.get_detail(customer)\n req.context['result'] = {'data': customer.to_dict(), 'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_0|>\n\n<|body_start_1|>\n customer = Customer()\n customer.id = _id\n CustomerHelper.delete(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_1|>\n\n<|body_start_2|>\n customer = CustomerHelper.parse_from_query_string_request(req)\n customer.id = _id\n CustomerHelper.update(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000176", "length_bytes": 1846, "license_type": "no_license", "methods": [{"docstring": "handle GET requests", "name": "on_get", "signature": "def on_get(self, req, res, _id)"}, {"docstring": "handle DELETE requests", "name": "on_delete", "signature": "def on_delete(self, req, res, _id)"}, {"docstring": "handle PUT requests", "name": "on_put", "signature": "def on_put(self, req, res, _id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_034664", "prompt": "Implement the Python class `CustomerListener` described below.\n\nClass description:\nCustomerListner\n\nMethod signatures and docstrings:\n- def on_get(self, req, res, _id): handle GET requests\n- def on_delete(self, req, res, _id): handle DELETE requests\n- def on_put(self, req, res, _id): handle PUT requests", "prompted_full_text": "Implement the Python class `CustomerListener` described below.\n\nClass description:\nCustomerListner\n\nMethod signatures and docstrings:\n- def on_get(self, req, res, _id): handle GET requests\n- def on_delete(self, req, res, _id): handle DELETE requests\n- def on_put(self, req, res, _id): handle PUT requests\n\n<|skeleton|>\nclass CustomerListener:\n \"\"\"CustomerListner\"\"\"\n\n def on_get(self, req, res, _id):\n \"\"\"handle GET requests\"\"\"\n <|body_0|>\n\n def on_delete(self, req, res, _id):\n \"\"\"handle DELETE requests\"\"\"\n <|body_1|>\n\n def on_put(self, req, res, _id):\n \"\"\"handle PUT requests\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n customer = Customer()\n customer.id = _id\n customer = CustomerHelper.get_detail(customer)\n req.context['result'] = {'data': customer.to_dict(), 'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_0|>\n\n<|body_start_1|>\n customer = Customer()\n customer.id = _id\n CustomerHelper.delete(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_1|>\n\n<|body_start_2|>\n customer = CustomerHelper.parse_from_query_string_request(req)\n customer.id = _id\n CustomerHelper.update(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n<|end_body_2|>\n", "revision_id": "11b885c11fe3b506f092c9aa1c22e1062f5f1e70", "skeleton": "<|skeleton|>\nclass CustomerListener:\n \"\"\"CustomerListner\"\"\"\n\n def on_get(self, req, res, _id):\n \"\"\"handle GET requests\"\"\"\n <|body_0|>\n\n def on_delete(self, req, res, _id):\n \"\"\"handle DELETE requests\"\"\"\n <|body_1|>\n\n def on_put(self, req, res, _id):\n \"\"\"handle PUT requests\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CustomerListener:\n \"\"\"CustomerListner\"\"\"\n\n def on_get(self, req, res, _id):\n \"\"\"handle GET requests\"\"\"\n customer = Customer()\n customer.id = _id\n customer = CustomerHelper.get_detail(customer)\n req.context['result'] = {'data': customer.to_dict(), 'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n\n def on_delete(self, req, res, _id):\n \"\"\"handle DELETE requests\"\"\"\n customer = Customer()\n customer.id = _id\n CustomerHelper.delete(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n\n def on_put(self, req, res, _id):\n \"\"\"handle PUT requests\"\"\"\n customer = CustomerHelper.parse_from_query_string_request(req)\n customer.id = _id\n CustomerHelper.update(customer)\n req.context['result'] = {'status': {'code': 200, 'message': 'success'}}\n res.status = falcon.HTTP_200\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/listener/customer.py", "source_repo": "arsystem/warehouse.api", "split": "test", "star_events_count": 0} {"blob_id": "25fa50fb7404b71cb74cb8286b7222ed2f877697", "bodies": ["len1, len2 = (len(nums1), len(nums2))\nres = [0] * k\nfor i in xrange(max(0, k - len2), min(k, len1) + 1):\n subarray1 = self.get_max_subarray(nums1, i)\n subarray2 = self.get_max_subarray(nums2, k - i)\n res = max(res, [max(subarray1, subarray2).pop(0) for _ in xrange(k)])\nreturn res", "res = [0] * k\ncur = 0\nfor i in xrange(len(nums)):\n while cur > 0 and cur + len(nums) - i > k and (nums[i] > res[cur - 1]):\n cur -= 1\n if cur < k:\n res[cur] = nums[i]\n cur += 1\nreturn res"], "bodies_text": "<|body_start_0|>\n len1, len2 = (len(nums1), len(nums2))\n res = [0] * k\n for i in xrange(max(0, k - len2), min(k, len1) + 1):\n subarray1 = self.get_max_subarray(nums1, i)\n subarray2 = self.get_max_subarray(nums2, k - i)\n res = max(res, [max(subarray1, subarray2).pop(0) for _ in xrange(k)])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = [0] * k\n cur = 0\n for i in xrange(len(nums)):\n while cur > 0 and cur + len(nums) - i > k and (nums[i] > res[cur - 1]):\n cur -= 1\n if cur < k:\n res[cur] = nums[i]\n cur += 1\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxNumber(self, nums1, nums2, k):\n \"\"\":type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def get_max_subarray(self, nums, k):\n \"\"\"A method to get the max subarray while preserve the related position in nums\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n len1, len2 = (len(nums1), len(nums2))\n res = [0] * k\n for i in xrange(max(0, k - len2), min(k, len1) + 1):\n subarray1 = self.get_max_subarray(nums1, i)\n subarray2 = self.get_max_subarray(nums2, k - i)\n res = max(res, [max(subarray1, subarray2).pop(0) for _ in xrange(k)])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = [0] * k\n cur = 0\n for i in xrange(len(nums)):\n while cur > 0 and cur + len(nums) - i > k and (nums[i] > res[cur - 1]):\n cur -= 1\n if cur < k:\n res[cur] = nums[i]\n cur += 1\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000177", "length_bytes": 1116, "license_type": "no_license", "methods": [{"docstring": ":type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]", "name": "maxNumber", "signature": "def maxNumber(self, nums1, nums2, k)"}, {"docstring": "A method to get the max subarray while preserve the related position in nums", "name": "get_max_subarray", "signature": "def get_max_subarray(self, nums, k)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_017601", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxNumber(self, nums1, nums2, k): :type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]\n- def get_max_subarray(self, nums, k): A method to get the max subarray while preserve the related position in nums", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxNumber(self, nums1, nums2, k): :type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]\n- def get_max_subarray(self, nums, k): A method to get the max subarray while preserve the related position in nums\n\n<|skeleton|>\nclass Solution:\n\n def maxNumber(self, nums1, nums2, k):\n \"\"\":type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def get_max_subarray(self, nums, k):\n \"\"\"A method to get the max subarray while preserve the related position in nums\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n len1, len2 = (len(nums1), len(nums2))\n res = [0] * k\n for i in xrange(max(0, k - len2), min(k, len1) + 1):\n subarray1 = self.get_max_subarray(nums1, i)\n subarray2 = self.get_max_subarray(nums2, k - i)\n res = max(res, [max(subarray1, subarray2).pop(0) for _ in xrange(k)])\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n res = [0] * k\n cur = 0\n for i in xrange(len(nums)):\n while cur > 0 and cur + len(nums) - i > k and (nums[i] > res[cur - 1]):\n cur -= 1\n if cur < k:\n res[cur] = nums[i]\n cur += 1\n return res\n<|end_body_1|>\n", "revision_id": "580366c7de5f27a931930aeec5e08aa043aa1d54", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxNumber(self, nums1, nums2, k):\n \"\"\":type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def get_max_subarray(self, nums, k):\n \"\"\"A method to get the max subarray while preserve the related position in nums\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maxNumber(self, nums1, nums2, k):\n \"\"\":type nums1: List[int] :type nums2: List[int] :type k: int :rtype: List[int]\"\"\"\n len1, len2 = (len(nums1), len(nums2))\n res = [0] * k\n for i in xrange(max(0, k - len2), min(k, len1) + 1):\n subarray1 = self.get_max_subarray(nums1, i)\n subarray2 = self.get_max_subarray(nums2, k - i)\n res = max(res, [max(subarray1, subarray2).pop(0) for _ in xrange(k)])\n return res\n\n def get_max_subarray(self, nums, k):\n \"\"\"A method to get the max subarray while preserve the related position in nums\"\"\"\n res = [0] * k\n cur = 0\n for i in xrange(len(nums)):\n while cur > 0 and cur + len(nums) - i > k and (nums[i] > res[cur - 1]):\n cur -= 1\n if cur < k:\n res[cur] = nums[i]\n cur += 1\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "321-Create-Maximum-Number/solution.py", "source_repo": "z502185331/leetcode-python", "split": "test", "star_events_count": 0} {"blob_id": "ae53ce51b67ccea4836c1ec7ad4421123c9336ed", "bodies": ["if not root:\n return '# '\nans = str(root.val) + ' '\nans += self.serialize(root.left)\nans += self.serialize(root.right)\nreturn ans", "def helper(data):\n if not data:\n return None\n cur = data.pop(0)\n if cur == '#':\n return None\n root = TreeNode(int(cur))\n root.left = helper(data)\n root.right = helper(data)\n return root\nreturn helper(data.split(' '))"], "bodies_text": "<|body_start_0|>\n if not root:\n return '# '\n ans = str(root.val) + ' '\n ans += self.serialize(root.left)\n ans += self.serialize(root.right)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def helper(data):\n if not data:\n return None\n cur = data.pop(0)\n if cur == '#':\n return None\n root = TreeNode(int(cur))\n root.left = helper(data)\n root.right = helper(data)\n return root\n return helper(data.split(' '))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return '# '\n ans = str(root.val) + ' '\n ans += self.serialize(root.left)\n ans += self.serialize(root.right)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def helper(data):\n if not data:\n return None\n cur = data.pop(0)\n if cur == '#':\n return None\n root = TreeNode(int(cur))\n root.left = helper(data)\n root.right = helper(data)\n return root\n return helper(data.split(' '))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000178", "length_bytes": 834, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string.", "name": "serialize", "signature": "def serialize(self, root: TreeNode) -> str"}, {"docstring": "Decodes your encoded data to tree.", "name": "deserialize", "signature": "def deserialize(self, data: str) -> TreeNode"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013577", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root: TreeNode) -> str: Encodes a tree to a single string.\n- def deserialize(self, data: str) -> TreeNode: Decodes your encoded data to tree.\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return '# '\n ans = str(root.val) + ' '\n ans += self.serialize(root.left)\n ans += self.serialize(root.right)\n return ans\n<|end_body_0|>\n\n<|body_start_1|>\n def helper(data):\n if not data:\n return None\n cur = data.pop(0)\n if cur == '#':\n return None\n root = TreeNode(int(cur))\n root.left = helper(data)\n root.right = helper(data)\n return root\n return helper(data.split(' '))\n<|end_body_1|>\n", "revision_id": "64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n <|body_0|>\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root: TreeNode) -> str:\n \"\"\"Encodes a tree to a single string.\"\"\"\n if not root:\n return '# '\n ans = str(root.val) + ' '\n ans += self.serialize(root.left)\n ans += self.serialize(root.right)\n return ans\n\n def deserialize(self, data: str) -> TreeNode:\n \"\"\"Decodes your encoded data to tree.\"\"\"\n def helper(data):\n if not data:\n return None\n cur = data.pop(0)\n if cur == '#':\n return None\n root = TreeNode(int(cur))\n root.left = helper(data)\n root.right = helper(data)\n return root\n return helper(data.split(' '))\n", "source": "the_stack_v2_python_sparse", "source_path": "449_SerializeandDeserializeBST/Codec.py", "source_repo": "excaliburnan/SolutionsOnLeetcodeForZZW", "split": "test", "star_events_count": 0} {"blob_id": "5f7ca75835359f7974610da4d96ce05eb1ca243d", "bodies": ["word = ''\ninverse = inverser(word)\nself.assertEqual(inverse, word)\nself.assertIsInstance(inverse, str)", "word = int(152)\ninverse = inverser(word)\nself.assertFalse(inverse)", "word = 'abcde'\ninverse = inverser(word)\nreverse = inverser(inverse)\nself.assertEqual(reverse, word)\nself.assertFalse(word == inverse)", "sentence = 'abcde jklm nopq'\ninverse_sentence = inverser(sentence)\ninverse_word = inverse_string(sentence)\nreverse_sentence = inverser(inverse_sentence)\nself.assertEqual(sentence, reverse_sentence)\nself.assertFalse(sentence == inverse_word)"], "bodies_text": "<|body_start_0|>\n word = ''\n inverse = inverser(word)\n self.assertEqual(inverse, word)\n self.assertIsInstance(inverse, str)\n<|end_body_0|>\n\n<|body_start_1|>\n word = int(152)\n inverse = inverser(word)\n self.assertFalse(inverse)\n<|end_body_1|>\n\n<|body_start_2|>\n word = 'abcde'\n inverse = inverser(word)\n reverse = inverser(inverse)\n self.assertEqual(reverse, word)\n self.assertFalse(word == inverse)\n<|end_body_2|>\n\n<|body_start_3|>\n sentence = 'abcde jklm nopq'\n inverse_sentence = inverser(sentence)\n inverse_word = inverse_string(sentence)\n reverse_sentence = inverser(inverse_sentence)\n self.assertEqual(sentence, reverse_sentence)\n self.assertFalse(sentence == inverse_word)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "TestReverse", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestReverse:\n\n def test_reverse_empty_word(self):\n \"\"\"An empty word must return an empty response\"\"\"\n <|body_0|>\n\n def test_reverse_invalid_word(self):\n \"\"\"An invalid word must return False\"\"\"\n <|body_1|>\n\n def test_reverse_valid_word(self):\n \"\"\"For a valid string, it must return the ivnersed string\"\"\"\n <|body_2|>\n\n def test_reverse_valid_sentence(self):\n \"\"\"It must reverse only the words, not the sentence\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n word = ''\n inverse = inverser(word)\n self.assertEqual(inverse, word)\n self.assertIsInstance(inverse, str)\n<|end_body_0|>\n\n<|body_start_1|>\n word = int(152)\n inverse = inverser(word)\n self.assertFalse(inverse)\n<|end_body_1|>\n\n<|body_start_2|>\n word = 'abcde'\n inverse = inverser(word)\n reverse = inverser(inverse)\n self.assertEqual(reverse, word)\n self.assertFalse(word == inverse)\n<|end_body_2|>\n\n<|body_start_3|>\n sentence = 'abcde jklm nopq'\n inverse_sentence = inverser(sentence)\n inverse_word = inverse_string(sentence)\n reverse_sentence = inverser(inverse_sentence)\n self.assertEqual(sentence, reverse_sentence)\n self.assertFalse(sentence == inverse_word)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000179", "length_bytes": 1793, "license_type": "no_license", "methods": [{"docstring": "An empty word must return an empty response", "name": "test_reverse_empty_word", "signature": "def test_reverse_empty_word(self)"}, {"docstring": "An invalid word must return False", "name": "test_reverse_invalid_word", "signature": "def test_reverse_invalid_word(self)"}, {"docstring": "For a valid string, it must return the ivnersed string", "name": "test_reverse_valid_word", "signature": "def test_reverse_valid_word(self)"}, {"docstring": "It must reverse only the words, not the sentence", "name": "test_reverse_valid_sentence", "signature": "def test_reverse_valid_sentence(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_039240", "prompt": "Implement the Python class `TestReverse` described below.\n\nClass description:\nImplement the TestReverse class.\n\nMethod signatures and docstrings:\n- def test_reverse_empty_word(self): An empty word must return an empty response\n- def test_reverse_invalid_word(self): An invalid word must return False\n- def test_reverse_valid_word(self): For a valid string, it must return the ivnersed string\n- def test_reverse_valid_sentence(self): It must reverse only the words, not the sentence", "prompted_full_text": "Implement the Python class `TestReverse` described below.\n\nClass description:\nImplement the TestReverse class.\n\nMethod signatures and docstrings:\n- def test_reverse_empty_word(self): An empty word must return an empty response\n- def test_reverse_invalid_word(self): An invalid word must return False\n- def test_reverse_valid_word(self): For a valid string, it must return the ivnersed string\n- def test_reverse_valid_sentence(self): It must reverse only the words, not the sentence\n\n<|skeleton|>\nclass TestReverse:\n\n def test_reverse_empty_word(self):\n \"\"\"An empty word must return an empty response\"\"\"\n <|body_0|>\n\n def test_reverse_invalid_word(self):\n \"\"\"An invalid word must return False\"\"\"\n <|body_1|>\n\n def test_reverse_valid_word(self):\n \"\"\"For a valid string, it must return the ivnersed string\"\"\"\n <|body_2|>\n\n def test_reverse_valid_sentence(self):\n \"\"\"It must reverse only the words, not the sentence\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n word = ''\n inverse = inverser(word)\n self.assertEqual(inverse, word)\n self.assertIsInstance(inverse, str)\n<|end_body_0|>\n\n<|body_start_1|>\n word = int(152)\n inverse = inverser(word)\n self.assertFalse(inverse)\n<|end_body_1|>\n\n<|body_start_2|>\n word = 'abcde'\n inverse = inverser(word)\n reverse = inverser(inverse)\n self.assertEqual(reverse, word)\n self.assertFalse(word == inverse)\n<|end_body_2|>\n\n<|body_start_3|>\n sentence = 'abcde jklm nopq'\n inverse_sentence = inverser(sentence)\n inverse_word = inverse_string(sentence)\n reverse_sentence = inverser(inverse_sentence)\n self.assertEqual(sentence, reverse_sentence)\n self.assertFalse(sentence == inverse_word)\n<|end_body_3|>\n", "revision_id": "5aeea5a300292f68a598c3eb469a56dd9fa986c8", "skeleton": "<|skeleton|>\nclass TestReverse:\n\n def test_reverse_empty_word(self):\n \"\"\"An empty word must return an empty response\"\"\"\n <|body_0|>\n\n def test_reverse_invalid_word(self):\n \"\"\"An invalid word must return False\"\"\"\n <|body_1|>\n\n def test_reverse_valid_word(self):\n \"\"\"For a valid string, it must return the ivnersed string\"\"\"\n <|body_2|>\n\n def test_reverse_valid_sentence(self):\n \"\"\"It must reverse only the words, not the sentence\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestReverse:\n def test_reverse_empty_word(self):\n \"\"\"An empty word must return an empty response\"\"\"\n word = ''\n inverse = inverser(word)\n self.assertEqual(inverse, word)\n self.assertIsInstance(inverse, str)\n\n def test_reverse_invalid_word(self):\n \"\"\"An invalid word must return False\"\"\"\n word = int(152)\n inverse = inverser(word)\n self.assertFalse(inverse)\n\n def test_reverse_valid_word(self):\n \"\"\"For a valid string, it must return the ivnersed string\"\"\"\n word = 'abcde'\n inverse = inverser(word)\n reverse = inverser(inverse)\n self.assertEqual(reverse, word)\n self.assertFalse(word == inverse)\n\n def test_reverse_valid_sentence(self):\n \"\"\"It must reverse only the words, not the sentence\"\"\"\n sentence = 'abcde jklm nopq'\n inverse_sentence = inverser(sentence)\n inverse_word = inverse_string(sentence)\n reverse_sentence = inverser(inverse_sentence)\n self.assertEqual(sentence, reverse_sentence)\n self.assertFalse(sentence == inverse_word)\n", "source": "the_stack_v2_python_sparse", "source_path": "exercicios_iniciante2.py", "source_repo": "dabraaoarch/python-exercises", "split": "test", "star_events_count": 0} {"blob_id": "feddfd45deb809dff5a7c78d496c58b8da97d392", "bodies": ["if data is None:\n if lambtha > 0:\n self.lambtha = float(lambtha)\n else:\n raise ValueError('lambtha must be a positive value')\nelse:\n if type(data) != list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = sum(data) / len(data)", "e = 2.7182818285\nif type(k) != int:\n k = int(k)\nif k < 0:\n return 0\nfactorial = 1\nfor i in range(1, k + 1):\n factorial = factorial * i\npmf = pow(e, -self.lambtha) * pow(self.lambtha, k) / factorial\nreturn pmf", "if type(k) != int:\n k = int(k)\nif k < 0:\n return 0\nfactorial = 1\nprob = []\nfor i in range(0, k + 1):\n prob.append(self.pmf(i))\nreturn sum(prob)"], "bodies_text": "<|body_start_0|>\n if data is None:\n if lambtha > 0:\n self.lambtha = float(lambtha)\n else:\n raise ValueError('lambtha must be a positive value')\n else:\n if type(data) != list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = sum(data) / len(data)\n<|end_body_0|>\n\n<|body_start_1|>\n e = 2.7182818285\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n for i in range(1, k + 1):\n factorial = factorial * i\n pmf = pow(e, -self.lambtha) * pow(self.lambtha, k) / factorial\n return pmf\n<|end_body_1|>\n\n<|body_start_2|>\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n prob = []\n for i in range(0, k + 1):\n prob.append(self.pmf(i))\n return sum(prob)\n<|end_body_2|>\n", "class_docstring": "The class to call method of poisson distribution", "class_name": "Poisson", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Poisson:\n \"\"\"The class to call method of poisson distribution\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"Initialize method\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of successes\"\"\"\n <|body_1|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is None:\n if lambtha > 0:\n self.lambtha = float(lambtha)\n else:\n raise ValueError('lambtha must be a positive value')\n else:\n if type(data) != list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = sum(data) / len(data)\n<|end_body_0|>\n\n<|body_start_1|>\n e = 2.7182818285\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n for i in range(1, k + 1):\n factorial = factorial * i\n pmf = pow(e, -self.lambtha) * pow(self.lambtha, k) / factorial\n return pmf\n<|end_body_1|>\n\n<|body_start_2|>\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n prob = []\n for i in range(0, k + 1):\n prob.append(self.pmf(i))\n return sum(prob)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000180", "length_bytes": 1380, "license_type": "no_license", "methods": [{"docstring": "Initialize method", "name": "__init__", "signature": "def __init__(self, data=None, lambtha=1.0)"}, {"docstring": "Calculates the value of the PMF for a given number of successes", "name": "pmf", "signature": "def pmf(self, k)"}, {"docstring": "Calculates the value of the CDF", "name": "cdf", "signature": "def cdf(self, k)"}], "n_methods": 3, "prompt": "Implement the Python class `Poisson` described below.\n\nClass description:\nThe class to call method of poisson distribution\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, lambtha=1.0): Initialize method\n- def pmf(self, k): Calculates the value of the PMF for a given number of successes\n- def cdf(self, k): Calculates the value of the CDF", "prompted_full_text": "Implement the Python class `Poisson` described below.\n\nClass description:\nThe class to call method of poisson distribution\n\nMethod signatures and docstrings:\n- def __init__(self, data=None, lambtha=1.0): Initialize method\n- def pmf(self, k): Calculates the value of the PMF for a given number of successes\n- def cdf(self, k): Calculates the value of the CDF\n\n<|skeleton|>\nclass Poisson:\n \"\"\"The class to call method of poisson distribution\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"Initialize method\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of successes\"\"\"\n <|body_1|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if data is None:\n if lambtha > 0:\n self.lambtha = float(lambtha)\n else:\n raise ValueError('lambtha must be a positive value')\n else:\n if type(data) != list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = sum(data) / len(data)\n<|end_body_0|>\n\n<|body_start_1|>\n e = 2.7182818285\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n for i in range(1, k + 1):\n factorial = factorial * i\n pmf = pow(e, -self.lambtha) * pow(self.lambtha, k) / factorial\n return pmf\n<|end_body_1|>\n\n<|body_start_2|>\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n prob = []\n for i in range(0, k + 1):\n prob.append(self.pmf(i))\n return sum(prob)\n<|end_body_2|>\n", "revision_id": "c277c8bfcc2e65c8d0a483c08dd72cd093274c02", "skeleton": "<|skeleton|>\nclass Poisson:\n \"\"\"The class to call method of poisson distribution\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"Initialize method\"\"\"\n <|body_0|>\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of successes\"\"\"\n <|body_1|>\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Poisson:\n \"\"\"The class to call method of poisson distribution\"\"\"\n\n def __init__(self, data=None, lambtha=1.0):\n \"\"\"Initialize method\"\"\"\n if data is None:\n if lambtha > 0:\n self.lambtha = float(lambtha)\n else:\n raise ValueError('lambtha must be a positive value')\n else:\n if type(data) != list:\n raise TypeError('data must be a list')\n if len(data) < 2:\n raise ValueError('data must contain multiple values')\n self.lambtha = sum(data) / len(data)\n\n def pmf(self, k):\n \"\"\"Calculates the value of the PMF for a given number of successes\"\"\"\n e = 2.7182818285\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n for i in range(1, k + 1):\n factorial = factorial * i\n pmf = pow(e, -self.lambtha) * pow(self.lambtha, k) / factorial\n return pmf\n\n def cdf(self, k):\n \"\"\"Calculates the value of the CDF\"\"\"\n if type(k) != int:\n k = int(k)\n if k < 0:\n return 0\n factorial = 1\n prob = []\n for i in range(0, k + 1):\n prob.append(self.pmf(i))\n return sum(prob)\n", "source": "the_stack_v2_python_sparse", "source_path": "math/0x03-probability/poisson.py", "source_repo": "JDorangetree/holbertonschool-machine_learning", "split": "test", "star_events_count": 0} {"blob_id": "538dc3e0ba8c7f1489a099b687a1a0da134340c3", "bodies": ["rr = raw_response()\ntry:\n response = urllib.urlopen(self.settings.URL, xml)\nexcept IOError:\n raise connection_error_exception\nrr.text = response.read()\nrr.enter()\nreturn self._parse_response(rr.text)", "result_code = int(result_code)\nif result_code == 0:\n pass\nelif result_code == 1:\n raise transaction_failure_exception('authentication failed to the merchant services gateway')\nelif result_code == 4:\n raise transaction_failure_exception('invalid currency amount format. Please indicate currency in cents')\nelif result_code == 12:\n raise transaction_denied_exception\nelse:\n raise transaction_failure_exception('transaction failed')"], "bodies_text": "<|body_start_0|>\n rr = raw_response()\n try:\n response = urllib.urlopen(self.settings.URL, xml)\n except IOError:\n raise connection_error_exception\n rr.text = response.read()\n rr.enter()\n return self._parse_response(rr.text)\n<|end_body_0|>\n\n<|body_start_1|>\n result_code = int(result_code)\n if result_code == 0:\n pass\n elif result_code == 1:\n raise transaction_failure_exception('authentication failed to the merchant services gateway')\n elif result_code == 4:\n raise transaction_failure_exception('invalid currency amount format. Please indicate currency in cents')\n elif result_code == 12:\n raise transaction_denied_exception\n else:\n raise transaction_failure_exception('transaction failed')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "PayFlowProTransaction", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PayFlowProTransaction:\n\n def _issue_request(self, xml):\n \"\"\"This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string\"\"\"\n <|body_0|>\n\n def _handle_result_code(self, result_code):\n \"\"\"Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rr = raw_response()\n try:\n response = urllib.urlopen(self.settings.URL, xml)\n except IOError:\n raise connection_error_exception\n rr.text = response.read()\n rr.enter()\n return self._parse_response(rr.text)\n<|end_body_0|>\n\n<|body_start_1|>\n result_code = int(result_code)\n if result_code == 0:\n pass\n elif result_code == 1:\n raise transaction_failure_exception('authentication failed to the merchant services gateway')\n elif result_code == 4:\n raise transaction_failure_exception('invalid currency amount format. Please indicate currency in cents')\n elif result_code == 12:\n raise transaction_denied_exception\n else:\n raise transaction_failure_exception('transaction failed')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000181", "length_bytes": 4910, "license_type": "permissive", "methods": [{"docstring": "This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string", "name": "_issue_request", "signature": "def _issue_request(self, xml)"}, {"docstring": "Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int", "name": "_handle_result_code", "signature": "def _handle_result_code(self, result_code)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001025", "prompt": "Implement the Python class `PayFlowProTransaction` described below.\n\nClass description:\nImplement the PayFlowProTransaction class.\n\nMethod signatures and docstrings:\n- def _issue_request(self, xml): This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string\n- def _handle_result_code(self, result_code): Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int", "prompted_full_text": "Implement the Python class `PayFlowProTransaction` described below.\n\nClass description:\nImplement the PayFlowProTransaction class.\n\nMethod signatures and docstrings:\n- def _issue_request(self, xml): This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string\n- def _handle_result_code(self, result_code): Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int\n\n<|skeleton|>\nclass PayFlowProTransaction:\n\n def _issue_request(self, xml):\n \"\"\"This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string\"\"\"\n <|body_0|>\n\n def _handle_result_code(self, result_code):\n \"\"\"Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rr = raw_response()\n try:\n response = urllib.urlopen(self.settings.URL, xml)\n except IOError:\n raise connection_error_exception\n rr.text = response.read()\n rr.enter()\n return self._parse_response(rr.text)\n<|end_body_0|>\n\n<|body_start_1|>\n result_code = int(result_code)\n if result_code == 0:\n pass\n elif result_code == 1:\n raise transaction_failure_exception('authentication failed to the merchant services gateway')\n elif result_code == 4:\n raise transaction_failure_exception('invalid currency amount format. Please indicate currency in cents')\n elif result_code == 12:\n raise transaction_denied_exception\n else:\n raise transaction_failure_exception('transaction failed')\n<|end_body_1|>\n", "revision_id": "a59457bc37f0501aea1f54d006a6de94ff80511c", "skeleton": "<|skeleton|>\nclass PayFlowProTransaction:\n\n def _issue_request(self, xml):\n \"\"\"This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string\"\"\"\n <|body_0|>\n\n def _handle_result_code(self, result_code):\n \"\"\"Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PayFlowProTransaction:\n def _issue_request(self, xml):\n \"\"\"This is a common method that can be used to submit a request in XML form to the payflowpro server @param xml XML data structure expected by paypal's server @type xml string\"\"\"\n rr = raw_response()\n try:\n response = urllib.urlopen(self.settings.URL, xml)\n except IOError:\n raise connection_error_exception\n rr.text = response.read()\n rr.enter()\n return self._parse_response(rr.text)\n\n def _handle_result_code(self, result_code):\n \"\"\"Interpret a result code and act accordingly @param result_code result code received from payflowpro server @type result_code int\"\"\"\n result_code = int(result_code)\n if result_code == 0:\n pass\n elif result_code == 1:\n raise transaction_failure_exception('authentication failed to the merchant services gateway')\n elif result_code == 4:\n raise transaction_failure_exception('invalid currency amount format. Please indicate currency in cents')\n elif result_code == 12:\n raise transaction_denied_exception\n else:\n raise transaction_failure_exception('transaction failed')\n", "source": "the_stack_v2_python_sparse", "source_path": "ecommerce/payflowpro.py", "source_repo": "ninemoreminutes/openassign-server", "split": "test", "star_events_count": 0} {"blob_id": "1cd86e3d3fd9bd509dc228d0e42129223ed30c82", "bodies": ["super(AntibioticDetector, self).__init__()\nself.threshold = antibiotic_threshold\nself.key = antibiotic_key\nself.needed_state_keys.setdefault('internal', set()).add(antibiotic_key)", "concentration = states['internal'][self.key]\nif concentration > self.threshold:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n super(AntibioticDetector, self).__init__()\n self.threshold = antibiotic_threshold\n self.key = antibiotic_key\n self.needed_state_keys.setdefault('internal', set()).add(antibiotic_key)\n<|end_body_0|>\n\n<|body_start_1|>\n concentration = states['internal'][self.key]\n if concentration > self.threshold:\n return False\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AntibioticDetector", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AntibioticDetector:\n\n def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic'):\n \"\"\"Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.\"\"\"\n <|body_0|>\n\n def check_can_survive(self, states):\n \"\"\"Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(AntibioticDetector, self).__init__()\n self.threshold = antibiotic_threshold\n self.key = antibiotic_key\n self.needed_state_keys.setdefault('internal', set()).add(antibiotic_key)\n<|end_body_0|>\n\n<|body_start_1|>\n concentration = states['internal'][self.key]\n if concentration > self.threshold:\n return False\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000182", "length_bytes": 11313, "license_type": "permissive", "methods": [{"docstring": "Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.", "name": "__init__", "signature": "def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic')"}, {"docstring": "Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.", "name": "check_can_survive", "signature": "def check_can_survive(self, states)"}], "n_methods": 2, "prompt": "Implement the Python class `AntibioticDetector` described below.\n\nClass description:\nImplement the AntibioticDetector class.\n\nMethod signatures and docstrings:\n- def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic'): Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.\n- def check_can_survive(self, states): Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.", "prompted_full_text": "Implement the Python class `AntibioticDetector` described below.\n\nClass description:\nImplement the AntibioticDetector class.\n\nMethod signatures and docstrings:\n- def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic'): Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.\n- def check_can_survive(self, states): Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.\n\n<|skeleton|>\nclass AntibioticDetector:\n\n def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic'):\n \"\"\"Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.\"\"\"\n <|body_0|>\n\n def check_can_survive(self, states):\n \"\"\"Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(AntibioticDetector, self).__init__()\n self.threshold = antibiotic_threshold\n self.key = antibiotic_key\n self.needed_state_keys.setdefault('internal', set()).add(antibiotic_key)\n<|end_body_0|>\n\n<|body_start_1|>\n concentration = states['internal'][self.key]\n if concentration > self.threshold:\n return False\n return True\n<|end_body_1|>\n", "revision_id": "c504704a63ee8211f5f11e7fe486287dbc7553c3", "skeleton": "<|skeleton|>\nclass AntibioticDetector:\n\n def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic'):\n \"\"\"Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.\"\"\"\n <|body_0|>\n\n def check_can_survive(self, states):\n \"\"\"Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AntibioticDetector:\n def __init__(self, antibiotic_threshold=0.9, antibiotic_key='antibiotic'):\n \"\"\"Death detector for antibiotics Checks whether the cell can survive the current internal antibiotic concentrations. Arguments: antibiotic_threshold (float): The maximum internal antibiotic concentration the cell can survive. antibiotic_key (str): The name of the variable storing the cell's internal antibiotic concentration.\"\"\"\n super(AntibioticDetector, self).__init__()\n self.threshold = antibiotic_threshold\n self.key = antibiotic_key\n self.needed_state_keys.setdefault('internal', set()).add(antibiotic_key)\n\n def check_can_survive(self, states):\n \"\"\"Checks if the current antibiotic concentration is survivable The internal antibiotic concentration MUST be stored in a variable of a port named ``internal``. Returns: bool: False if the antibiotic concentration is strictly greater than the the threshold. True otherwise.\"\"\"\n concentration = states['internal'][self.key]\n if concentration > self.threshold:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "vivarium_cell/processes/death.py", "source_repo": "vivarium-collective/vivarium-cell", "split": "test", "star_events_count": 7} {"blob_id": "a4fcd80a2e14716cca4c94e238b803267b96db50", "bodies": ["widget_attrs = {'class': theme.form_element_html_class, 'placeholder': self.data.placeholder}\nfield_kwargs = {'label': self.data.label, 'help_text': self.data.help_text, 'initial': self.data.initial, 'required': self.data.required, 'widget': TextInput(attrs=widget_attrs)}\nreturn [(self.data.name, DurationField, field_kwargs)]", "value = cleaned_data.get(self.data.name, None)\nif isinstance(value, datetime.timedelta):\n value = duration_string(value)\n cleaned_data[self.data.name] = value\n return cleaned_data", "value = form.cleaned_data.get(self.data.name, None)\nif isinstance(value, datetime.timedelta):\n value = duration_string(value)\n form.cleaned_data[self.data.name] = value\nreturn form"], "bodies_text": "<|body_start_0|>\n widget_attrs = {'class': theme.form_element_html_class, 'placeholder': self.data.placeholder}\n field_kwargs = {'label': self.data.label, 'help_text': self.data.help_text, 'initial': self.data.initial, 'required': self.data.required, 'widget': TextInput(attrs=widget_attrs)}\n return [(self.data.name, DurationField, field_kwargs)]\n<|end_body_0|>\n\n<|body_start_1|>\n value = cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n cleaned_data[self.data.name] = value\n return cleaned_data\n<|end_body_1|>\n\n<|body_start_2|>\n value = form.cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n form.cleaned_data[self.data.name] = value\n return form\n<|end_body_2|>\n", "class_docstring": "Duration field plugin.", "class_name": "DurationInputPlugin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DurationInputPlugin:\n \"\"\"Duration field plugin.\"\"\"\n\n def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs):\n \"\"\"Get form field instances.\"\"\"\n <|body_0|>\n\n def prepare_plugin_form_data(self, cleaned_data):\n \"\"\"Prepare plugin form data. Might be used in integration plugins.\"\"\"\n <|body_1|>\n\n def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs):\n \"\"\"Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n widget_attrs = {'class': theme.form_element_html_class, 'placeholder': self.data.placeholder}\n field_kwargs = {'label': self.data.label, 'help_text': self.data.help_text, 'initial': self.data.initial, 'required': self.data.required, 'widget': TextInput(attrs=widget_attrs)}\n return [(self.data.name, DurationField, field_kwargs)]\n<|end_body_0|>\n\n<|body_start_1|>\n value = cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n cleaned_data[self.data.name] = value\n return cleaned_data\n<|end_body_1|>\n\n<|body_start_2|>\n value = form.cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n form.cleaned_data[self.data.name] = value\n return form\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000183", "length_bytes": 3796, "license_type": "permissive", "methods": [{"docstring": "Get form field instances.", "name": "get_form_field_instances", "signature": "def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs)"}, {"docstring": "Prepare plugin form data. Might be used in integration plugins.", "name": "prepare_plugin_form_data", "signature": "def prepare_plugin_form_data(self, cleaned_data)"}, {"docstring": "Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:", "name": "submit_plugin_form_data", "signature": "def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_039651", "prompt": "Implement the Python class `DurationInputPlugin` described below.\n\nClass description:\nDuration field plugin.\n\nMethod signatures and docstrings:\n- def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): Get form field instances.\n- def prepare_plugin_form_data(self, cleaned_data): Prepare plugin form data. Might be used in integration plugins.\n- def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs): Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:", "prompted_full_text": "Implement the Python class `DurationInputPlugin` described below.\n\nClass description:\nDuration field plugin.\n\nMethod signatures and docstrings:\n- def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs): Get form field instances.\n- def prepare_plugin_form_data(self, cleaned_data): Prepare plugin form data. Might be used in integration plugins.\n- def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs): Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:\n\n<|skeleton|>\nclass DurationInputPlugin:\n \"\"\"Duration field plugin.\"\"\"\n\n def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs):\n \"\"\"Get form field instances.\"\"\"\n <|body_0|>\n\n def prepare_plugin_form_data(self, cleaned_data):\n \"\"\"Prepare plugin form data. Might be used in integration plugins.\"\"\"\n <|body_1|>\n\n def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs):\n \"\"\"Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n widget_attrs = {'class': theme.form_element_html_class, 'placeholder': self.data.placeholder}\n field_kwargs = {'label': self.data.label, 'help_text': self.data.help_text, 'initial': self.data.initial, 'required': self.data.required, 'widget': TextInput(attrs=widget_attrs)}\n return [(self.data.name, DurationField, field_kwargs)]\n<|end_body_0|>\n\n<|body_start_1|>\n value = cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n cleaned_data[self.data.name] = value\n return cleaned_data\n<|end_body_1|>\n\n<|body_start_2|>\n value = form.cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n form.cleaned_data[self.data.name] = value\n return form\n<|end_body_2|>\n", "revision_id": "4f6ca37bc600dcba3f74400d299826882d53b7d2", "skeleton": "<|skeleton|>\nclass DurationInputPlugin:\n \"\"\"Duration field plugin.\"\"\"\n\n def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs):\n \"\"\"Get form field instances.\"\"\"\n <|body_0|>\n\n def prepare_plugin_form_data(self, cleaned_data):\n \"\"\"Prepare plugin form data. Might be used in integration plugins.\"\"\"\n <|body_1|>\n\n def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs):\n \"\"\"Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DurationInputPlugin:\n \"\"\"Duration field plugin.\"\"\"\n\n def get_form_field_instances(self, request=None, form_entry=None, form_element_entries=None, **kwargs):\n \"\"\"Get form field instances.\"\"\"\n widget_attrs = {'class': theme.form_element_html_class, 'placeholder': self.data.placeholder}\n field_kwargs = {'label': self.data.label, 'help_text': self.data.help_text, 'initial': self.data.initial, 'required': self.data.required, 'widget': TextInput(attrs=widget_attrs)}\n return [(self.data.name, DurationField, field_kwargs)]\n\n def prepare_plugin_form_data(self, cleaned_data):\n \"\"\"Prepare plugin form data. Might be used in integration plugins.\"\"\"\n value = cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n cleaned_data[self.data.name] = value\n return cleaned_data\n\n def submit_plugin_form_data(self, form_entry, request, form, form_element_entries=None, **kwargs):\n \"\"\"Submit plugin form data/process. :param fobi.models.FormEntry form_entry: Instance of ``fobi.models.FormEntry``. :param django.http.HttpRequest request: :param django.forms.Form form:\"\"\"\n value = form.cleaned_data.get(self.data.name, None)\n if isinstance(value, datetime.timedelta):\n value = duration_string(value)\n form.cleaned_data[self.data.name] = value\n return form\n", "source": "the_stack_v2_python_sparse", "source_path": "events/contrib/plugins/form_elements/fields/duration/base.py", "source_repo": "mansonul/events", "split": "test", "star_events_count": 0} {"blob_id": "bf63ad77f15de81181dcdfd8cda8a766dafa0f3a", "bodies": ["data, errors = super().extractData(setErrors=setErrors)\nfor group in self.groups:\n groupData, groupErrors = group.extractData(setErrors=setErrors)\n data.update(groupData)\n if groupErrors:\n if errors:\n errors += groupErrors\n else:\n errors = groupErrors\nzope.event.notify(DataExtractedEvent(data, errors, self))\nreturn (data, errors)", "descriptions = []\ncontent = self.getContent()\nchanged = form.applyChanges(self, content, data)\nfor group in self.groups:\n groupChanged = group.applyChanges(data)\n for interface, names in groupChanged.items():\n changed[interface] = changed.get(interface, []) + names\nif changed:\n for interface, names in changed.items():\n descriptions.append(zope.lifecycleevent.Attributes(interface, *names))\n zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(content, *descriptions))\nreturn changed", "self.updateWidgets()\ngroups = []\nfor groupClass in self.groups:\n if interfaces.IGroup.providedBy(groupClass):\n group = groupClass\n else:\n group = groupClass(self.context, self.request, self)\n group.update()\n groups.append(group)\nself.groups = tuple(groups)\nself.updateActions()\nself.actions.execute()"], "bodies_text": "<|body_start_0|>\n data, errors = super().extractData(setErrors=setErrors)\n for group in self.groups:\n groupData, groupErrors = group.extractData(setErrors=setErrors)\n data.update(groupData)\n if groupErrors:\n if errors:\n errors += groupErrors\n else:\n errors = groupErrors\n zope.event.notify(DataExtractedEvent(data, errors, self))\n return (data, errors)\n<|end_body_0|>\n\n<|body_start_1|>\n descriptions = []\n content = self.getContent()\n changed = form.applyChanges(self, content, data)\n for group in self.groups:\n groupChanged = group.applyChanges(data)\n for interface, names in groupChanged.items():\n changed[interface] = changed.get(interface, []) + names\n if changed:\n for interface, names in changed.items():\n descriptions.append(zope.lifecycleevent.Attributes(interface, *names))\n zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(content, *descriptions))\n return changed\n<|end_body_1|>\n\n<|body_start_2|>\n self.updateWidgets()\n groups = []\n for groupClass in self.groups:\n if interfaces.IGroup.providedBy(groupClass):\n group = groupClass\n else:\n group = groupClass(self.context, self.request, self)\n group.update()\n groups.append(group)\n self.groups = tuple(groups)\n self.updateActions()\n self.actions.execute()\n<|end_body_2|>\n", "class_docstring": "A mix-in class for add and edit forms to support groups.", "class_name": "GroupForm", "detected_licenses": ["ZPL-2.1"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GroupForm:\n \"\"\"A mix-in class for add and edit forms to support groups.\"\"\"\n\n def extractData(self, setErrors=True):\n \"\"\"See interfaces.IForm\"\"\"\n <|body_0|>\n\n def applyChanges(self, data):\n \"\"\"See interfaces.IEditForm\"\"\"\n <|body_1|>\n\n def update(self):\n \"\"\"See interfaces.IForm\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data, errors = super().extractData(setErrors=setErrors)\n for group in self.groups:\n groupData, groupErrors = group.extractData(setErrors=setErrors)\n data.update(groupData)\n if groupErrors:\n if errors:\n errors += groupErrors\n else:\n errors = groupErrors\n zope.event.notify(DataExtractedEvent(data, errors, self))\n return (data, errors)\n<|end_body_0|>\n\n<|body_start_1|>\n descriptions = []\n content = self.getContent()\n changed = form.applyChanges(self, content, data)\n for group in self.groups:\n groupChanged = group.applyChanges(data)\n for interface, names in groupChanged.items():\n changed[interface] = changed.get(interface, []) + names\n if changed:\n for interface, names in changed.items():\n descriptions.append(zope.lifecycleevent.Attributes(interface, *names))\n zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(content, *descriptions))\n return changed\n<|end_body_1|>\n\n<|body_start_2|>\n self.updateWidgets()\n groups = []\n for groupClass in self.groups:\n if interfaces.IGroup.providedBy(groupClass):\n group = groupClass\n else:\n group = groupClass(self.context, self.request, self)\n group.update()\n groups.append(group)\n self.groups = tuple(groups)\n self.updateActions()\n self.actions.execute()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000184", "length_bytes": 5215, "license_type": "permissive", "methods": [{"docstring": "See interfaces.IForm", "name": "extractData", "signature": "def extractData(self, setErrors=True)"}, {"docstring": "See interfaces.IEditForm", "name": "applyChanges", "signature": "def applyChanges(self, data)"}, {"docstring": "See interfaces.IForm", "name": "update", "signature": "def update(self)"}], "n_methods": 3, "prompt": "Implement the Python class `GroupForm` described below.\n\nClass description:\nA mix-in class for add and edit forms to support groups.\n\nMethod signatures and docstrings:\n- def extractData(self, setErrors=True): See interfaces.IForm\n- def applyChanges(self, data): See interfaces.IEditForm\n- def update(self): See interfaces.IForm", "prompted_full_text": "Implement the Python class `GroupForm` described below.\n\nClass description:\nA mix-in class for add and edit forms to support groups.\n\nMethod signatures and docstrings:\n- def extractData(self, setErrors=True): See interfaces.IForm\n- def applyChanges(self, data): See interfaces.IEditForm\n- def update(self): See interfaces.IForm\n\n<|skeleton|>\nclass GroupForm:\n \"\"\"A mix-in class for add and edit forms to support groups.\"\"\"\n\n def extractData(self, setErrors=True):\n \"\"\"See interfaces.IForm\"\"\"\n <|body_0|>\n\n def applyChanges(self, data):\n \"\"\"See interfaces.IEditForm\"\"\"\n <|body_1|>\n\n def update(self):\n \"\"\"See interfaces.IForm\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data, errors = super().extractData(setErrors=setErrors)\n for group in self.groups:\n groupData, groupErrors = group.extractData(setErrors=setErrors)\n data.update(groupData)\n if groupErrors:\n if errors:\n errors += groupErrors\n else:\n errors = groupErrors\n zope.event.notify(DataExtractedEvent(data, errors, self))\n return (data, errors)\n<|end_body_0|>\n\n<|body_start_1|>\n descriptions = []\n content = self.getContent()\n changed = form.applyChanges(self, content, data)\n for group in self.groups:\n groupChanged = group.applyChanges(data)\n for interface, names in groupChanged.items():\n changed[interface] = changed.get(interface, []) + names\n if changed:\n for interface, names in changed.items():\n descriptions.append(zope.lifecycleevent.Attributes(interface, *names))\n zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(content, *descriptions))\n return changed\n<|end_body_1|>\n\n<|body_start_2|>\n self.updateWidgets()\n groups = []\n for groupClass in self.groups:\n if interfaces.IGroup.providedBy(groupClass):\n group = groupClass\n else:\n group = groupClass(self.context, self.request, self)\n group.update()\n groups.append(group)\n self.groups = tuple(groups)\n self.updateActions()\n self.actions.execute()\n<|end_body_2|>\n", "revision_id": "aa47e9b109ad2d7de600fc1d4ea7359d8144f356", "skeleton": "<|skeleton|>\nclass GroupForm:\n \"\"\"A mix-in class for add and edit forms to support groups.\"\"\"\n\n def extractData(self, setErrors=True):\n \"\"\"See interfaces.IForm\"\"\"\n <|body_0|>\n\n def applyChanges(self, data):\n \"\"\"See interfaces.IEditForm\"\"\"\n <|body_1|>\n\n def update(self):\n \"\"\"See interfaces.IForm\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GroupForm:\n \"\"\"A mix-in class for add and edit forms to support groups.\"\"\"\n\n def extractData(self, setErrors=True):\n \"\"\"See interfaces.IForm\"\"\"\n data, errors = super().extractData(setErrors=setErrors)\n for group in self.groups:\n groupData, groupErrors = group.extractData(setErrors=setErrors)\n data.update(groupData)\n if groupErrors:\n if errors:\n errors += groupErrors\n else:\n errors = groupErrors\n zope.event.notify(DataExtractedEvent(data, errors, self))\n return (data, errors)\n\n def applyChanges(self, data):\n \"\"\"See interfaces.IEditForm\"\"\"\n descriptions = []\n content = self.getContent()\n changed = form.applyChanges(self, content, data)\n for group in self.groups:\n groupChanged = group.applyChanges(data)\n for interface, names in groupChanged.items():\n changed[interface] = changed.get(interface, []) + names\n if changed:\n for interface, names in changed.items():\n descriptions.append(zope.lifecycleevent.Attributes(interface, *names))\n zope.event.notify(zope.lifecycleevent.ObjectModifiedEvent(content, *descriptions))\n return changed\n\n def update(self):\n \"\"\"See interfaces.IForm\"\"\"\n self.updateWidgets()\n groups = []\n for groupClass in self.groups:\n if interfaces.IGroup.providedBy(groupClass):\n group = groupClass\n else:\n group = groupClass(self.context, self.request, self)\n group.update()\n groups.append(group)\n self.groups = tuple(groups)\n self.updateActions()\n self.actions.execute()\n", "source": "the_stack_v2_python_sparse", "source_path": "src/z3c/form/group.py", "source_repo": "zopefoundation/z3c.form", "split": "test", "star_events_count": 6} {"blob_id": "c6a550a378f2cda7e85b6bd609568186edf62a06", "bodies": ["filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\nsession = keywords.get('session')\nif not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\nenv = api.muranoclient(request).environments.get_model(environment, '/', session)\nif env:\n return env['?'].get('metadata', {})\nreturn {}", "client = api.muranoclient(request)\nfilters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\nsession = keywords.get('session')\nif not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\nupdated = request.DATA.get('updated', {})\npatch = {'op': 'replace', 'path': '/?/metadata', 'value': updated}\nclient.environments.update_model(environment, [patch], session)"], "bodies_text": "<|body_start_0|>\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n env = api.muranoclient(request).environments.get_model(environment, '/', session)\n if env:\n return env['?'].get('metadata', {})\n return {}\n<|end_body_0|>\n\n<|body_start_1|>\n client = api.muranoclient(request)\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n updated = request.DATA.get('updated', {})\n patch = {'op': 'replace', 'path': '/?/metadata', 'value': updated}\n client.environments.update_model(environment, [patch], session)\n<|end_body_1|>\n", "class_docstring": "API for Murano components Metadata", "class_name": "EnvironmentsMetadata", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EnvironmentsMetadata:\n \"\"\"API for Murano components Metadata\"\"\"\n\n def get(self, request, environment):\n \"\"\"Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.\"\"\"\n <|body_0|>\n\n def post(self, request, environment):\n \"\"\"Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n env = api.muranoclient(request).environments.get_model(environment, '/', session)\n if env:\n return env['?'].get('metadata', {})\n return {}\n<|end_body_0|>\n\n<|body_start_1|>\n client = api.muranoclient(request)\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n updated = request.DATA.get('updated', {})\n patch = {'op': 'replace', 'path': '/?/metadata', 'value': updated}\n client.environments.update_model(environment, [patch], session)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000185", "length_bytes": 6096, "license_type": "permissive", "methods": [{"docstring": "Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.", "name": "get", "signature": "def get(self, request, environment)"}, {"docstring": "Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.", "name": "post", "signature": "def post(self, request, environment)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002669", "prompt": "Implement the Python class `EnvironmentsMetadata` described below.\n\nClass description:\nAPI for Murano components Metadata\n\nMethod signatures and docstrings:\n- def get(self, request, environment): Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.\n- def post(self, request, environment): Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.", "prompted_full_text": "Implement the Python class `EnvironmentsMetadata` described below.\n\nClass description:\nAPI for Murano components Metadata\n\nMethod signatures and docstrings:\n- def get(self, request, environment): Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.\n- def post(self, request, environment): Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.\n\n<|skeleton|>\nclass EnvironmentsMetadata:\n \"\"\"API for Murano components Metadata\"\"\"\n\n def get(self, request, environment):\n \"\"\"Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.\"\"\"\n <|body_0|>\n\n def post(self, request, environment):\n \"\"\"Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n env = api.muranoclient(request).environments.get_model(environment, '/', session)\n if env:\n return env['?'].get('metadata', {})\n return {}\n<|end_body_0|>\n\n<|body_start_1|>\n client = api.muranoclient(request)\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n updated = request.DATA.get('updated', {})\n patch = {'op': 'replace', 'path': '/?/metadata', 'value': updated}\n client.environments.update_model(environment, [patch], session)\n<|end_body_1|>\n", "revision_id": "54e2ea8a71385b1c7624b3d2c8056bd8a2c2e2f7", "skeleton": "<|skeleton|>\nclass EnvironmentsMetadata:\n \"\"\"API for Murano components Metadata\"\"\"\n\n def get(self, request, environment):\n \"\"\"Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.\"\"\"\n <|body_0|>\n\n def post(self, request, environment):\n \"\"\"Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EnvironmentsMetadata:\n \"\"\"API for Murano components Metadata\"\"\"\n\n def get(self, request, environment):\n \"\"\"Get a metadata object for an environment Example GET: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword.\"\"\"\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n env = api.muranoclient(request).environments.get_model(environment, '/', session)\n if env:\n return env['?'].get('metadata', {})\n return {}\n\n def post(self, request, environment):\n \"\"\"Set a metadata object for a given environment Example POST: http://localhost/api/app-catalog/environments/123/metadata The following get parameters may be passed in the GET request: :param environment: identifier of the environment Any additionally a \"session\" parameter should be passed through the API as a keyword. Request body should contain 'updated' keyword, contain all the updated metadata attributes. If it is empty, the metadata is considered to be deleted.\"\"\"\n client = api.muranoclient(request)\n filters, keywords = rest_utils.parse_filters_kwargs(request, ['session'])\n session = keywords.get('session')\n if not session:\n session = env_api.Session.get_or_create_or_delete(request, environment)\n updated = request.DATA.get('updated', {})\n patch = {'op': 'replace', 'path': '/?/metadata', 'value': updated}\n client.environments.update_model(environment, [patch], session)\n", "source": "the_stack_v2_python_sparse", "source_path": "muranodashboard/api/rest/environments.py", "source_repo": "openstack/murano-dashboard", "split": "test", "star_events_count": 38} {"blob_id": "fbd56d646930a400bad223587d419ae56059ad6a", "bodies": ["if graph.is_directed():\n raise ValueError('the graph is directed')\nself.graph = graph\nfor edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\nself.independent_set = set(self.graph.iternodes())\nself.cardinality = self.graph.v()\nself.source = None", "if source is not None:\n self.source = source\ndegree_dict = dict(((node, self.graph.degree(node)) for node in self.graph.iternodes()))\nwhile not self._is_independent():\n source = max((node for node in self.independent_set if node != self.source), key=degree_dict.__getitem__)\n self.independent_set.remove(source)\n for target in self.graph.iteradjacent(source):\n degree_dict[target] -= 1\nself.cardinality = len(self.independent_set)", "for edge in self.graph.iteredges():\n if edge.source in self.independent_set and edge.target in self.independent_set:\n return False\nreturn True"], "bodies_text": "<|body_start_0|>\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = set(self.graph.iternodes())\n self.cardinality = self.graph.v()\n self.source = None\n<|end_body_0|>\n\n<|body_start_1|>\n if source is not None:\n self.source = source\n degree_dict = dict(((node, self.graph.degree(node)) for node in self.graph.iternodes()))\n while not self._is_independent():\n source = max((node for node in self.independent_set if node != self.source), key=degree_dict.__getitem__)\n self.independent_set.remove(source)\n for target in self.graph.iteradjacent(source):\n degree_dict[target] -= 1\n self.cardinality = len(self.independent_set)\n<|end_body_1|>\n\n<|body_start_2|>\n for edge in self.graph.iteredges():\n if edge.source in self.independent_set and edge.target in self.independent_set:\n return False\n return True\n<|end_body_2|>\n", "class_docstring": "Find a maximal independent set.", "class_name": "LargestLastIndependentSet3", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LargestLastIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n <|body_0|>\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n <|body_1|>\n\n def _is_independent(self):\n \"\"\"Independence test.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = set(self.graph.iternodes())\n self.cardinality = self.graph.v()\n self.source = None\n<|end_body_0|>\n\n<|body_start_1|>\n if source is not None:\n self.source = source\n degree_dict = dict(((node, self.graph.degree(node)) for node in self.graph.iternodes()))\n while not self._is_independent():\n source = max((node for node in self.independent_set if node != self.source), key=degree_dict.__getitem__)\n self.independent_set.remove(source)\n for target in self.graph.iteradjacent(source):\n degree_dict[target] -= 1\n self.cardinality = len(self.independent_set)\n<|end_body_1|>\n\n<|body_start_2|>\n for edge in self.graph.iteredges():\n if edge.source in self.independent_set and edge.target in self.independent_set:\n return False\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000186", "length_bytes": 12259, "license_type": "permissive", "methods": [{"docstring": "The algorithm initialization.", "name": "__init__", "signature": "def __init__(self, graph)"}, {"docstring": "Executable pseudocode.", "name": "run", "signature": "def run(self, source=None)"}, {"docstring": "Independence test.", "name": "_is_independent", "signature": "def _is_independent(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_020676", "prompt": "Implement the Python class `LargestLastIndependentSet3` described below.\n\nClass description:\nFind a maximal independent set.\n\nMethod signatures and docstrings:\n- def __init__(self, graph): The algorithm initialization.\n- def run(self, source=None): Executable pseudocode.\n- def _is_independent(self): Independence test.", "prompted_full_text": "Implement the Python class `LargestLastIndependentSet3` described below.\n\nClass description:\nFind a maximal independent set.\n\nMethod signatures and docstrings:\n- def __init__(self, graph): The algorithm initialization.\n- def run(self, source=None): Executable pseudocode.\n- def _is_independent(self): Independence test.\n\n<|skeleton|>\nclass LargestLastIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n <|body_0|>\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n <|body_1|>\n\n def _is_independent(self):\n \"\"\"Independence test.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = set(self.graph.iternodes())\n self.cardinality = self.graph.v()\n self.source = None\n<|end_body_0|>\n\n<|body_start_1|>\n if source is not None:\n self.source = source\n degree_dict = dict(((node, self.graph.degree(node)) for node in self.graph.iternodes()))\n while not self._is_independent():\n source = max((node for node in self.independent_set if node != self.source), key=degree_dict.__getitem__)\n self.independent_set.remove(source)\n for target in self.graph.iteradjacent(source):\n degree_dict[target] -= 1\n self.cardinality = len(self.independent_set)\n<|end_body_1|>\n\n<|body_start_2|>\n for edge in self.graph.iteredges():\n if edge.source in self.independent_set and edge.target in self.independent_set:\n return False\n return True\n<|end_body_2|>\n", "revision_id": "0ff4ae303e8824e6bb8474d23b29a7b3e5ed8e60", "skeleton": "<|skeleton|>\nclass LargestLastIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n <|body_0|>\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n <|body_1|>\n\n def _is_independent(self):\n \"\"\"Independence test.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LargestLastIndependentSet3:\n \"\"\"Find a maximal independent set.\"\"\"\n\n def __init__(self, graph):\n \"\"\"The algorithm initialization.\"\"\"\n if graph.is_directed():\n raise ValueError('the graph is directed')\n self.graph = graph\n for edge in self.graph.iteredges():\n if edge.source == edge.target:\n raise ValueError('a loop detected')\n self.independent_set = set(self.graph.iternodes())\n self.cardinality = self.graph.v()\n self.source = None\n\n def run(self, source=None):\n \"\"\"Executable pseudocode.\"\"\"\n if source is not None:\n self.source = source\n degree_dict = dict(((node, self.graph.degree(node)) for node in self.graph.iternodes()))\n while not self._is_independent():\n source = max((node for node in self.independent_set if node != self.source), key=degree_dict.__getitem__)\n self.independent_set.remove(source)\n for target in self.graph.iteradjacent(source):\n degree_dict[target] -= 1\n self.cardinality = len(self.independent_set)\n\n def _is_independent(self):\n \"\"\"Independence test.\"\"\"\n for edge in self.graph.iteredges():\n if edge.source in self.independent_set and edge.target in self.independent_set:\n return False\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "graphtheory/independentsets/isetll.py", "source_repo": "kgashok/graphs-dict", "split": "test", "star_events_count": 0} {"blob_id": "10e9de4e5434284eebe66126f323c2810e9aa7af", "bodies": ["super(EfficientDet, self).__init__()\nself.image_size = config['image_size']\nfpn_channels = config['fpn_channels']\nanchor_scales = config['anchor_scales']\nanchor_aspect_ratios = config['anchor_aspect_ratios']\nanchor_base_scale = config['anchor_base_scale']\nregressor_classifier_num_repeat = config['regressor_classifier_num_repeat']\nother_norm_layer = config['other_norm_layer']\nalpha = config['alpha']\ngamma = config['gamma']\nnum_anchors = len(anchor_scales) * len(anchor_aspect_ratios)\nself.preprocess = PreProcess(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\nself.backbone = EfficientNetWithBiFPN(config)\nself.classifier = Classifier(fpn_channels, num_anchors, num_classes, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\nself.regressor = Regressor(fpn_channels, num_anchors, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\nself.anchor_gen = AnchorGenerator(anchor_base_scale, anchor_scales, anchor_aspect_ratios, (3, 4, 5, 6, 7))\nself.loss_fn = FocalLoss(alpha=alpha, gamma=gamma, beta=1 / 9)\nself.postprocess = PostProcess()", "assert isinstance(image_list, list) and isinstance(image_list[0], torch.Tensor)\nif image_size:\n image_size = max(image_size, self.image_size)\nelse:\n image_size = self.image_size\nimage_size = min(1920, image_size // 128 * 128)\nimage_list, targets = self.preprocess(image_list, targets, image_size)\nx = image_list.tensors\nfeatures = self.backbone(x)\nclassifications = self.classifier(features)\nregressions = self.regressor(features)\ndel features\nanchors = self.anchor_gen(x)\nregressions[..., 0::2], regressions[..., 1::2] = (regressions[..., 1::2], regressions[..., 0::2].clone())\nif targets is not None:\n if score_thresh is not None or nms_thresh is not None:\n print('Warning: no need to transfer score_thresh or nms_thresh')\n loss = self.loss_fn(classifications, regressions, anchors, targets)\n return loss\nelse:\n score_thresh = score_thresh or 0.2\n nms_thresh = nms_thresh or 0.5\n result = self.postprocess(image_list, classifications, regressions, anchors, score_thresh, nms_thresh)\n return result"], "bodies_text": "<|body_start_0|>\n super(EfficientDet, self).__init__()\n self.image_size = config['image_size']\n fpn_channels = config['fpn_channels']\n anchor_scales = config['anchor_scales']\n anchor_aspect_ratios = config['anchor_aspect_ratios']\n anchor_base_scale = config['anchor_base_scale']\n regressor_classifier_num_repeat = config['regressor_classifier_num_repeat']\n other_norm_layer = config['other_norm_layer']\n alpha = config['alpha']\n gamma = config['gamma']\n num_anchors = len(anchor_scales) * len(anchor_aspect_ratios)\n self.preprocess = PreProcess(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n self.backbone = EfficientNetWithBiFPN(config)\n self.classifier = Classifier(fpn_channels, num_anchors, num_classes, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.regressor = Regressor(fpn_channels, num_anchors, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.anchor_gen = AnchorGenerator(anchor_base_scale, anchor_scales, anchor_aspect_ratios, (3, 4, 5, 6, 7))\n self.loss_fn = FocalLoss(alpha=alpha, gamma=gamma, beta=1 / 9)\n self.postprocess = PostProcess()\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(image_list, list) and isinstance(image_list[0], torch.Tensor)\n if image_size:\n image_size = max(image_size, self.image_size)\n else:\n image_size = self.image_size\n image_size = min(1920, image_size // 128 * 128)\n image_list, targets = self.preprocess(image_list, targets, image_size)\n x = image_list.tensors\n features = self.backbone(x)\n classifications = self.classifier(features)\n regressions = self.regressor(features)\n del features\n anchors = self.anchor_gen(x)\n regressions[..., 0::2], regressions[..., 1::2] = (regressions[..., 1::2], regressions[..., 0::2].clone())\n if targets is not None:\n if score_thresh is not None or nms_thresh is not None:\n print('Warning: no need to transfer score_thresh or nms_thresh')\n loss = self.loss_fn(classifications, regressions, anchors, targets)\n return loss\n else:\n score_thresh = score_thresh or 0.2\n nms_thresh = nms_thresh or 0.5\n result = self.postprocess(image_list, classifications, regressions, anchors, score_thresh, nms_thresh)\n return result\n<|end_body_1|>\n", "class_docstring": "", "class_name": "EfficientDet", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EfficientDet:\n\n def __init__(self, num_classes, config):\n \"\"\"please use _efficientdet()\"\"\"\n <|body_0|>\n\n def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None):\n \"\"\":param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EfficientDet, self).__init__()\n self.image_size = config['image_size']\n fpn_channels = config['fpn_channels']\n anchor_scales = config['anchor_scales']\n anchor_aspect_ratios = config['anchor_aspect_ratios']\n anchor_base_scale = config['anchor_base_scale']\n regressor_classifier_num_repeat = config['regressor_classifier_num_repeat']\n other_norm_layer = config['other_norm_layer']\n alpha = config['alpha']\n gamma = config['gamma']\n num_anchors = len(anchor_scales) * len(anchor_aspect_ratios)\n self.preprocess = PreProcess(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n self.backbone = EfficientNetWithBiFPN(config)\n self.classifier = Classifier(fpn_channels, num_anchors, num_classes, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.regressor = Regressor(fpn_channels, num_anchors, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.anchor_gen = AnchorGenerator(anchor_base_scale, anchor_scales, anchor_aspect_ratios, (3, 4, 5, 6, 7))\n self.loss_fn = FocalLoss(alpha=alpha, gamma=gamma, beta=1 / 9)\n self.postprocess = PostProcess()\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(image_list, list) and isinstance(image_list[0], torch.Tensor)\n if image_size:\n image_size = max(image_size, self.image_size)\n else:\n image_size = self.image_size\n image_size = min(1920, image_size // 128 * 128)\n image_list, targets = self.preprocess(image_list, targets, image_size)\n x = image_list.tensors\n features = self.backbone(x)\n classifications = self.classifier(features)\n regressions = self.regressor(features)\n del features\n anchors = self.anchor_gen(x)\n regressions[..., 0::2], regressions[..., 1::2] = (regressions[..., 1::2], regressions[..., 0::2].clone())\n if targets is not None:\n if score_thresh is not None or nms_thresh is not None:\n print('Warning: no need to transfer score_thresh or nms_thresh')\n loss = self.loss_fn(classifications, regressions, anchors, targets)\n return loss\n else:\n score_thresh = score_thresh or 0.2\n nms_thresh = nms_thresh or 0.5\n result = self.postprocess(image_list, classifications, regressions, anchors, score_thresh, nms_thresh)\n return result\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000187", "length_bytes": 6541, "license_type": "permissive", "methods": [{"docstring": "please use _efficientdet()", "name": "__init__", "signature": "def __init__(self, num_classes, config)"}, {"docstring": ":param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict", "name": "forward", "signature": "def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037353", "prompt": "Implement the Python class `EfficientDet` described below.\n\nClass description:\nImplement the EfficientDet class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_classes, config): please use _efficientdet()\n- def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None): :param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict", "prompted_full_text": "Implement the Python class `EfficientDet` described below.\n\nClass description:\nImplement the EfficientDet class.\n\nMethod signatures and docstrings:\n- def __init__(self, num_classes, config): please use _efficientdet()\n- def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None): :param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict\n\n<|skeleton|>\nclass EfficientDet:\n\n def __init__(self, num_classes, config):\n \"\"\"please use _efficientdet()\"\"\"\n <|body_0|>\n\n def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None):\n \"\"\":param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EfficientDet, self).__init__()\n self.image_size = config['image_size']\n fpn_channels = config['fpn_channels']\n anchor_scales = config['anchor_scales']\n anchor_aspect_ratios = config['anchor_aspect_ratios']\n anchor_base_scale = config['anchor_base_scale']\n regressor_classifier_num_repeat = config['regressor_classifier_num_repeat']\n other_norm_layer = config['other_norm_layer']\n alpha = config['alpha']\n gamma = config['gamma']\n num_anchors = len(anchor_scales) * len(anchor_aspect_ratios)\n self.preprocess = PreProcess(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n self.backbone = EfficientNetWithBiFPN(config)\n self.classifier = Classifier(fpn_channels, num_anchors, num_classes, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.regressor = Regressor(fpn_channels, num_anchors, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.anchor_gen = AnchorGenerator(anchor_base_scale, anchor_scales, anchor_aspect_ratios, (3, 4, 5, 6, 7))\n self.loss_fn = FocalLoss(alpha=alpha, gamma=gamma, beta=1 / 9)\n self.postprocess = PostProcess()\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(image_list, list) and isinstance(image_list[0], torch.Tensor)\n if image_size:\n image_size = max(image_size, self.image_size)\n else:\n image_size = self.image_size\n image_size = min(1920, image_size // 128 * 128)\n image_list, targets = self.preprocess(image_list, targets, image_size)\n x = image_list.tensors\n features = self.backbone(x)\n classifications = self.classifier(features)\n regressions = self.regressor(features)\n del features\n anchors = self.anchor_gen(x)\n regressions[..., 0::2], regressions[..., 1::2] = (regressions[..., 1::2], regressions[..., 0::2].clone())\n if targets is not None:\n if score_thresh is not None or nms_thresh is not None:\n print('Warning: no need to transfer score_thresh or nms_thresh')\n loss = self.loss_fn(classifications, regressions, anchors, targets)\n return loss\n else:\n score_thresh = score_thresh or 0.2\n nms_thresh = nms_thresh or 0.5\n result = self.postprocess(image_list, classifications, regressions, anchors, score_thresh, nms_thresh)\n return result\n<|end_body_1|>\n", "revision_id": "79616be397b7f57992cd43b772f65b58b5e25a8b", "skeleton": "<|skeleton|>\nclass EfficientDet:\n\n def __init__(self, num_classes, config):\n \"\"\"please use _efficientdet()\"\"\"\n <|body_0|>\n\n def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None):\n \"\"\":param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EfficientDet:\n def __init__(self, num_classes, config):\n \"\"\"please use _efficientdet()\"\"\"\n super(EfficientDet, self).__init__()\n self.image_size = config['image_size']\n fpn_channels = config['fpn_channels']\n anchor_scales = config['anchor_scales']\n anchor_aspect_ratios = config['anchor_aspect_ratios']\n anchor_base_scale = config['anchor_base_scale']\n regressor_classifier_num_repeat = config['regressor_classifier_num_repeat']\n other_norm_layer = config['other_norm_layer']\n alpha = config['alpha']\n gamma = config['gamma']\n num_anchors = len(anchor_scales) * len(anchor_aspect_ratios)\n self.preprocess = PreProcess(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225))\n self.backbone = EfficientNetWithBiFPN(config)\n self.classifier = Classifier(fpn_channels, num_anchors, num_classes, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.regressor = Regressor(fpn_channels, num_anchors, regressor_classifier_num_repeat, 0.01, 0.001, other_norm_layer)\n self.anchor_gen = AnchorGenerator(anchor_base_scale, anchor_scales, anchor_aspect_ratios, (3, 4, 5, 6, 7))\n self.loss_fn = FocalLoss(alpha=alpha, gamma=gamma, beta=1 / 9)\n self.postprocess = PostProcess()\n\n def forward(self, image_list, targets=None, image_size=None, score_thresh=None, nms_thresh=None):\n \"\"\":param image_list: List[Tensor[C, H, W]] [0., 1.] :param targets: Dict['labels': List[Tensor[NUMi]], 'boxes': List[Tensor[NUMi, 4]]] boxes: left, top, right, bottom :param image_size: int. 真实输入图片的大小 :return: train模式: loss: Dict eval模式: result: Dict\"\"\"\n assert isinstance(image_list, list) and isinstance(image_list[0], torch.Tensor)\n if image_size:\n image_size = max(image_size, self.image_size)\n else:\n image_size = self.image_size\n image_size = min(1920, image_size // 128 * 128)\n image_list, targets = self.preprocess(image_list, targets, image_size)\n x = image_list.tensors\n features = self.backbone(x)\n classifications = self.classifier(features)\n regressions = self.regressor(features)\n del features\n anchors = self.anchor_gen(x)\n regressions[..., 0::2], regressions[..., 1::2] = (regressions[..., 1::2], regressions[..., 0::2].clone())\n if targets is not None:\n if score_thresh is not None or nms_thresh is not None:\n print('Warning: no need to transfer score_thresh or nms_thresh')\n loss = self.loss_fn(classifications, regressions, anchors, targets)\n return loss\n else:\n score_thresh = score_thresh or 0.2\n nms_thresh = nms_thresh or 0.5\n result = self.postprocess(image_list, classifications, regressions, anchors, score_thresh, nms_thresh)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "models/efficientdet.py", "source_repo": "Jintao-Huang/EfficientDet_PyTorch", "split": "test", "star_events_count": 22} {"blob_id": "c5c47cab7df8d58eb8cd6c128e2fed1b6a804f68", "bodies": ["if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:\n tango.Except.throw_exception(f'Command TelescopeOn is not allowed in current state {self.state_model.op_state}.', 'Failed to invoke On command on CspMasterLeafNode.', 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\nreturn True", "this_server = TangoServerHelper.get_instance()\nif event.err:\n log_msg = f'{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}'\n self.logger.error(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\nelse:\n log_msg = f'{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}'\n self.logger.info(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)", "this_server = TangoServerHelper.get_instance()\ntry:\n sdp_master_ln_fqdn = ''\n property_val = this_server.read_property('SdpMasterFQDN')[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(const.CMD_ON, None, self.telescopeon_cmd_ended_cb)\n log_msg = const.STR_ON_CMD_SUCCESS\n self.logger.debug(log_msg)\nexcept DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f'{const.ERR_ON_CMD_FAIL}{dev_failed}'\n tango.Except.re_throw_exception(dev_failed, const.ERR_INVOKING_CMD, log_msg, 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)"], "bodies_text": "<|body_start_0|>\n if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:\n tango.Except.throw_exception(f'Command TelescopeOn is not allowed in current state {self.state_model.op_state}.', 'Failed to invoke On command on CspMasterLeafNode.', 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = f'{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}'\n self.logger.error(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n else:\n log_msg = f'{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}'\n self.logger.info(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n<|end_body_1|>\n\n<|body_start_2|>\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = ''\n property_val = this_server.read_property('SdpMasterFQDN')[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(const.CMD_ON, None, self.telescopeon_cmd_ended_cb)\n log_msg = const.STR_ON_CMD_SUCCESS\n self.logger.debug(log_msg)\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f'{const.ERR_ON_CMD_FAIL}{dev_failed}'\n tango.Except.re_throw_exception(dev_failed, const.ERR_INVOKING_CMD, log_msg, 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n<|end_body_2|>\n", "class_docstring": "A class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.", "class_name": "TelescopeOn", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TelescopeOn:\n \"\"\"A class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.\"\"\"\n\n def check_allowed(self):\n \"\"\"Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state\"\"\"\n <|body_0|>\n\n def telescopeon_cmd_ended_cb(self, event):\n \"\"\"Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:\"\"\"\n <|body_1|>\n\n def do(self):\n \"\"\"Method to invoke On command on SDP Master. :param argin: None. return: None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:\n tango.Except.throw_exception(f'Command TelescopeOn is not allowed in current state {self.state_model.op_state}.', 'Failed to invoke On command on CspMasterLeafNode.', 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = f'{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}'\n self.logger.error(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n else:\n log_msg = f'{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}'\n self.logger.info(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n<|end_body_1|>\n\n<|body_start_2|>\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = ''\n property_val = this_server.read_property('SdpMasterFQDN')[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(const.CMD_ON, None, self.telescopeon_cmd_ended_cb)\n log_msg = const.STR_ON_CMD_SUCCESS\n self.logger.debug(log_msg)\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f'{const.ERR_ON_CMD_FAIL}{dev_failed}'\n tango.Except.re_throw_exception(dev_failed, const.ERR_INVOKING_CMD, log_msg, 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000188", "length_bytes": 3987, "license_type": "permissive", "methods": [{"docstring": "Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state", "name": "check_allowed", "signature": "def check_allowed(self)"}, {"docstring": "Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:", "name": "telescopeon_cmd_ended_cb", "signature": "def telescopeon_cmd_ended_cb(self, event)"}, {"docstring": "Method to invoke On command on SDP Master. :param argin: None. return: None", "name": "do", "signature": "def do(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_048434", "prompt": "Implement the Python class `TelescopeOn` described below.\n\nClass description:\nA class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.\n\nMethod signatures and docstrings:\n- def check_allowed(self): Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state\n- def telescopeon_cmd_ended_cb(self, event): Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:\n- def do(self): Method to invoke On command on SDP Master. :param argin: None. return: None", "prompted_full_text": "Implement the Python class `TelescopeOn` described below.\n\nClass description:\nA class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.\n\nMethod signatures and docstrings:\n- def check_allowed(self): Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state\n- def telescopeon_cmd_ended_cb(self, event): Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:\n- def do(self): Method to invoke On command on SDP Master. :param argin: None. return: None\n\n<|skeleton|>\nclass TelescopeOn:\n \"\"\"A class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.\"\"\"\n\n def check_allowed(self):\n \"\"\"Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state\"\"\"\n <|body_0|>\n\n def telescopeon_cmd_ended_cb(self, event):\n \"\"\"Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:\"\"\"\n <|body_1|>\n\n def do(self):\n \"\"\"Method to invoke On command on SDP Master. :param argin: None. return: None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:\n tango.Except.throw_exception(f'Command TelescopeOn is not allowed in current state {self.state_model.op_state}.', 'Failed to invoke On command on CspMasterLeafNode.', 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = f'{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}'\n self.logger.error(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n else:\n log_msg = f'{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}'\n self.logger.info(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n<|end_body_1|>\n\n<|body_start_2|>\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = ''\n property_val = this_server.read_property('SdpMasterFQDN')[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(const.CMD_ON, None, self.telescopeon_cmd_ended_cb)\n log_msg = const.STR_ON_CMD_SUCCESS\n self.logger.debug(log_msg)\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f'{const.ERR_ON_CMD_FAIL}{dev_failed}'\n tango.Except.re_throw_exception(dev_failed, const.ERR_INVOKING_CMD, log_msg, 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n<|end_body_2|>\n", "revision_id": "7ee65a9c8dada9b28893144b372a398bd0646195", "skeleton": "<|skeleton|>\nclass TelescopeOn:\n \"\"\"A class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.\"\"\"\n\n def check_allowed(self):\n \"\"\"Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state\"\"\"\n <|body_0|>\n\n def telescopeon_cmd_ended_cb(self, event):\n \"\"\"Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:\"\"\"\n <|body_1|>\n\n def do(self):\n \"\"\"Method to invoke On command on SDP Master. :param argin: None. return: None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TelescopeOn:\n \"\"\"A class for TelescopeOn() command of SDP Master Leaf Node. TelescopeOn command is inherited from BaseCommand. Informs the SDP that it can start executing Processing Blocks. Sets the State to ON.\"\"\"\n\n def check_allowed(self):\n \"\"\"Checks whether this command is allowed to be run in current device state :return: True if this command is allowed to be run in current device state :rtype: boolean :raises: DevFailed if this command is not allowed to be run in current device state\"\"\"\n if self.state_model.op_state in [DevState.FAULT, DevState.UNKNOWN]:\n tango.Except.throw_exception(f'Command TelescopeOn is not allowed in current state {self.state_model.op_state}.', 'Failed to invoke On command on CspMasterLeafNode.', 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n return True\n\n def telescopeon_cmd_ended_cb(self, event):\n \"\"\"Callback function immediately executed when the asynchronous invoked command returns. Checks whether the On command has been successfully invoked on SDP Master. :param event: a CmdDoneEvent object. This class is used to pass data to the callback method in asynchronous callback model for command execution. :type: CmdDoneEvent object It has the following members: - device : (DeviceProxy) The DeviceProxy object on which the call was executed. - cmd_name : (str) The command name - argout_raw : (DeviceData) The command argout - argout : The command argout - err : (bool) A boolean flag set to true if the command failed. False otherwise - errors : (sequence) The error stack - ext :return:\"\"\"\n this_server = TangoServerHelper.get_instance()\n if event.err:\n log_msg = f'{const.ERR_INVOKING_CMD}{event.cmd_name}\\n{event.errors}'\n self.logger.error(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n else:\n log_msg = f'{const.STR_COMMAND}{event.cmd_name}{const.STR_INVOKE_SUCCESS}'\n self.logger.info(log_msg)\n this_server.write_attr('activityMessage', log_msg, False)\n\n def do(self):\n \"\"\"Method to invoke On command on SDP Master. :param argin: None. return: None\"\"\"\n this_server = TangoServerHelper.get_instance()\n try:\n sdp_master_ln_fqdn = ''\n property_val = this_server.read_property('SdpMasterFQDN')[0]\n sdp_master_ln_fqdn = sdp_master_ln_fqdn.join(property_val)\n sdp_mln_client_obj = TangoClient(sdp_master_ln_fqdn)\n sdp_mln_client_obj.send_command_async(const.CMD_ON, None, self.telescopeon_cmd_ended_cb)\n log_msg = const.STR_ON_CMD_SUCCESS\n self.logger.debug(log_msg)\n except DevFailed as dev_failed:\n self.logger.exception(dev_failed)\n log_msg = f'{const.ERR_ON_CMD_FAIL}{dev_failed}'\n tango.Except.re_throw_exception(dev_failed, const.ERR_INVOKING_CMD, log_msg, 'SdpMasterLeafNode.TelescopeOn()', tango.ErrSeverity.ERR)\n", "source": "the_stack_v2_python_sparse", "source_path": "temp_src/ska_tmc_sdpmasterleafnode_mid/telescope_on_command.py", "source_repo": "ska-telescope/tmc-prototype", "split": "test", "star_events_count": 4} {"blob_id": "32d7c17c0c078c94275936f6d76d0cd8567c8d05", "bodies": ["self.path = path\nself.log = log\nself.remesh = ' -2 format vtk -o SurfaceMesh.vtk Remeshing.geo > remesh.log'\nself.extrude = ' -3 format vtk -o SolidMesh.vtk Extrude.geo > extrude.log'\nif _platform == 'linux' or _platform == 'linux2':\n self.process = './gmsh'\nelif _platform == 'darwin':\n self.process = 'gmsh'\nelif _platform == 'win32':\n self.process = 'gmsh.exe'", "workingPath = os.getcwd()\nos.chdir(self.path)\nself.log.logMessInfo('GMSH COMMAND : ' + self.process + self.remesh)\np = subprocess.Popen(self.process + self.remesh, stdin=None, stdout=None, shell=True)\np.wait()\nos.chdir(workingPath)", "try:\n os.remove(os.path.join(self.path, 'Surface.stl'))\nexcept:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : Surface.stl')\ntry:\n os.remove(os.path.join(self.path, 'SurfaceMesh.vtk'))\nexcept:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SurfaceMesh.vtk')\ntry:\n os.remove(os.path.join(self.path, 'SolidMesh.vtk'))\nexcept:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SolidMesh.vtk')", "workingPath = os.getcwd()\nos.chdir(self.path)\nself.log.logMessInfo('GMSH COMMAND : ' + self.process + self.extrude)\np = subprocess.Popen(self.process + self.extrude, stdin=None, stdout=None, shell=True)\np.wait()\nos.chdir(workingPath)"], "bodies_text": "<|body_start_0|>\n self.path = path\n self.log = log\n self.remesh = ' -2 format vtk -o SurfaceMesh.vtk Remeshing.geo > remesh.log'\n self.extrude = ' -3 format vtk -o SolidMesh.vtk Extrude.geo > extrude.log'\n if _platform == 'linux' or _platform == 'linux2':\n self.process = './gmsh'\n elif _platform == 'darwin':\n self.process = 'gmsh'\n elif _platform == 'win32':\n self.process = 'gmsh.exe'\n<|end_body_0|>\n\n<|body_start_1|>\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.remesh)\n p = subprocess.Popen(self.process + self.remesh, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n os.remove(os.path.join(self.path, 'Surface.stl'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : Surface.stl')\n try:\n os.remove(os.path.join(self.path, 'SurfaceMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SurfaceMesh.vtk')\n try:\n os.remove(os.path.join(self.path, 'SolidMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SolidMesh.vtk')\n<|end_body_2|>\n\n<|body_start_3|>\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.extrude)\n p = subprocess.Popen(self.process + self.extrude, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n<|end_body_3|>\n", "class_docstring": "Fonction pour utiliser les routines gmsh de l'exterieur", "class_name": "gmshUtility", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass gmshUtility:\n \"\"\"Fonction pour utiliser les routines gmsh de l'exterieur\"\"\"\n\n def __init__(self, path, log):\n \"\"\"Initialisation de la classe\"\"\"\n <|body_0|>\n\n def Remeshing(self):\n \"\"\"Remeshing with gmsh\"\"\"\n <|body_1|>\n\n def remove_Mesh(self):\n \"\"\"Suppression des fichiers temporaires du maillage\"\"\"\n <|body_2|>\n\n def Extrude(self):\n \"\"\"Extrude mesh from surface\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.path = path\n self.log = log\n self.remesh = ' -2 format vtk -o SurfaceMesh.vtk Remeshing.geo > remesh.log'\n self.extrude = ' -3 format vtk -o SolidMesh.vtk Extrude.geo > extrude.log'\n if _platform == 'linux' or _platform == 'linux2':\n self.process = './gmsh'\n elif _platform == 'darwin':\n self.process = 'gmsh'\n elif _platform == 'win32':\n self.process = 'gmsh.exe'\n<|end_body_0|>\n\n<|body_start_1|>\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.remesh)\n p = subprocess.Popen(self.process + self.remesh, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n os.remove(os.path.join(self.path, 'Surface.stl'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : Surface.stl')\n try:\n os.remove(os.path.join(self.path, 'SurfaceMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SurfaceMesh.vtk')\n try:\n os.remove(os.path.join(self.path, 'SolidMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SolidMesh.vtk')\n<|end_body_2|>\n\n<|body_start_3|>\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.extrude)\n p = subprocess.Popen(self.process + self.extrude, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000189", "length_bytes": 4012, "license_type": "no_license", "methods": [{"docstring": "Initialisation de la classe", "name": "__init__", "signature": "def __init__(self, path, log)"}, {"docstring": "Remeshing with gmsh", "name": "Remeshing", "signature": "def Remeshing(self)"}, {"docstring": "Suppression des fichiers temporaires du maillage", "name": "remove_Mesh", "signature": "def remove_Mesh(self)"}, {"docstring": "Extrude mesh from surface", "name": "Extrude", "signature": "def Extrude(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_032018", "prompt": "Implement the Python class `gmshUtility` described below.\n\nClass description:\nFonction pour utiliser les routines gmsh de l'exterieur\n\nMethod signatures and docstrings:\n- def __init__(self, path, log): Initialisation de la classe\n- def Remeshing(self): Remeshing with gmsh\n- def remove_Mesh(self): Suppression des fichiers temporaires du maillage\n- def Extrude(self): Extrude mesh from surface", "prompted_full_text": "Implement the Python class `gmshUtility` described below.\n\nClass description:\nFonction pour utiliser les routines gmsh de l'exterieur\n\nMethod signatures and docstrings:\n- def __init__(self, path, log): Initialisation de la classe\n- def Remeshing(self): Remeshing with gmsh\n- def remove_Mesh(self): Suppression des fichiers temporaires du maillage\n- def Extrude(self): Extrude mesh from surface\n\n<|skeleton|>\nclass gmshUtility:\n \"\"\"Fonction pour utiliser les routines gmsh de l'exterieur\"\"\"\n\n def __init__(self, path, log):\n \"\"\"Initialisation de la classe\"\"\"\n <|body_0|>\n\n def Remeshing(self):\n \"\"\"Remeshing with gmsh\"\"\"\n <|body_1|>\n\n def remove_Mesh(self):\n \"\"\"Suppression des fichiers temporaires du maillage\"\"\"\n <|body_2|>\n\n def Extrude(self):\n \"\"\"Extrude mesh from surface\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.path = path\n self.log = log\n self.remesh = ' -2 format vtk -o SurfaceMesh.vtk Remeshing.geo > remesh.log'\n self.extrude = ' -3 format vtk -o SolidMesh.vtk Extrude.geo > extrude.log'\n if _platform == 'linux' or _platform == 'linux2':\n self.process = './gmsh'\n elif _platform == 'darwin':\n self.process = 'gmsh'\n elif _platform == 'win32':\n self.process = 'gmsh.exe'\n<|end_body_0|>\n\n<|body_start_1|>\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.remesh)\n p = subprocess.Popen(self.process + self.remesh, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n os.remove(os.path.join(self.path, 'Surface.stl'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : Surface.stl')\n try:\n os.remove(os.path.join(self.path, 'SurfaceMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SurfaceMesh.vtk')\n try:\n os.remove(os.path.join(self.path, 'SolidMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SolidMesh.vtk')\n<|end_body_2|>\n\n<|body_start_3|>\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.extrude)\n p = subprocess.Popen(self.process + self.extrude, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n<|end_body_3|>\n", "revision_id": "1bcb0dc067990660b1c180596a16bae8fcdeb17c", "skeleton": "<|skeleton|>\nclass gmshUtility:\n \"\"\"Fonction pour utiliser les routines gmsh de l'exterieur\"\"\"\n\n def __init__(self, path, log):\n \"\"\"Initialisation de la classe\"\"\"\n <|body_0|>\n\n def Remeshing(self):\n \"\"\"Remeshing with gmsh\"\"\"\n <|body_1|>\n\n def remove_Mesh(self):\n \"\"\"Suppression des fichiers temporaires du maillage\"\"\"\n <|body_2|>\n\n def Extrude(self):\n \"\"\"Extrude mesh from surface\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class gmshUtility:\n \"\"\"Fonction pour utiliser les routines gmsh de l'exterieur\"\"\"\n\n def __init__(self, path, log):\n \"\"\"Initialisation de la classe\"\"\"\n self.path = path\n self.log = log\n self.remesh = ' -2 format vtk -o SurfaceMesh.vtk Remeshing.geo > remesh.log'\n self.extrude = ' -3 format vtk -o SolidMesh.vtk Extrude.geo > extrude.log'\n if _platform == 'linux' or _platform == 'linux2':\n self.process = './gmsh'\n elif _platform == 'darwin':\n self.process = 'gmsh'\n elif _platform == 'win32':\n self.process = 'gmsh.exe'\n\n def Remeshing(self):\n \"\"\"Remeshing with gmsh\"\"\"\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.remesh)\n p = subprocess.Popen(self.process + self.remesh, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n\n def remove_Mesh(self):\n \"\"\"Suppression des fichiers temporaires du maillage\"\"\"\n try:\n os.remove(os.path.join(self.path, 'Surface.stl'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : Surface.stl')\n try:\n os.remove(os.path.join(self.path, 'SurfaceMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SurfaceMesh.vtk')\n try:\n os.remove(os.path.join(self.path, 'SolidMesh.vtk'))\n except:\n self.log.logMessInfo('GMSH PROBLEM : Inexist file : SolidMesh.vtk')\n\n def Extrude(self):\n \"\"\"Extrude mesh from surface\"\"\"\n workingPath = os.getcwd()\n os.chdir(self.path)\n self.log.logMessInfo('GMSH COMMAND : ' + self.process + self.extrude)\n p = subprocess.Popen(self.process + self.extrude, stdin=None, stdout=None, shell=True)\n p.wait()\n os.chdir(workingPath)\n", "source": "the_stack_v2_python_sparse", "source_path": "gmshUtility.py", "source_repo": "riccardoferrara/dcm2stl", "split": "test", "star_events_count": 0} {"blob_id": "9f2c163abd4552f69640a8555e9153232a2cdc50", "bodies": ["try:\n if os.path.isfile(filePath):\n file = open(filePath, 'r+')\n fileContents = file.read()\n if not fileContents:\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n else:\n file = open(filePath, 'w')\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\nexcept Exception as e:\n print('An error occurred:', e)", "try:\n file = open(filePath, 'r')\n for line in file.readlines():\n strData = line\n key = strData.split(',')[0].strip()\n value = strData.split(',')[1].strip()\n dictionary[key] = value\n file.close()\nexcept Exception as e:\n print('An error occurred:', e)", "for key, value in dictionary.items():\n dictRow = {key: value}\n list.append(dictRow)\nreturn list", "newDictRow = {addTask: addPriority}\nlist.append(newDictRow)\nlastRow = list[-1]\nif newDictRow == lastRow:\n isAdded = True\nelse:\n isAdded = False\nreturn isAdded", "for row in list:\n if deleteTask in row:\n list.remove(row)\n isFound = True\n else:\n isFound = False\nreturn isFound", "try:\n file = open(filePath, 'w')\n for row in list:\n strData = str(row).strip('[{}]').replace(\"'\", '').replace(':', ',')\n file.write(strData + '\\n')\n file.close()\n isSaved = True\nexcept Exception as e:\n print('An error occurred:', e)\n isSaved = False\nreturn isSaved"], "bodies_text": "<|body_start_0|>\n try:\n if os.path.isfile(filePath):\n file = open(filePath, 'r+')\n fileContents = file.read()\n if not fileContents:\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n else:\n file = open(filePath, 'w')\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n file = open(filePath, 'r')\n for line in file.readlines():\n strData = line\n key = strData.split(',')[0].strip()\n value = strData.split(',')[1].strip()\n dictionary[key] = value\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n<|end_body_1|>\n\n<|body_start_2|>\n for key, value in dictionary.items():\n dictRow = {key: value}\n list.append(dictRow)\n return list\n<|end_body_2|>\n\n<|body_start_3|>\n newDictRow = {addTask: addPriority}\n list.append(newDictRow)\n lastRow = list[-1]\n if newDictRow == lastRow:\n isAdded = True\n else:\n isAdded = False\n return isAdded\n<|end_body_3|>\n\n<|body_start_4|>\n for row in list:\n if deleteTask in row:\n list.remove(row)\n isFound = True\n else:\n isFound = False\n return isFound\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n file = open(filePath, 'w')\n for row in list:\n strData = str(row).strip('[{}]').replace(\"'\", '').replace(':', ',')\n file.write(strData + '\\n')\n file.close()\n isSaved = True\n except Exception as e:\n print('An error occurred:', e)\n isSaved = False\n return isSaved\n<|end_body_5|>\n", "class_docstring": "", "class_name": "ProcessFile", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProcessFile:\n\n def CreateFile(filePath):\n \"\"\"Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.\"\"\"\n <|body_0|>\n\n def LoadData(filePath, dictionary):\n \"\"\"Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.\"\"\"\n <|body_1|>\n\n def MoveToList(dictionary, list):\n \"\"\"Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.\"\"\"\n <|body_2|>\n\n def AddTask(list, addTask, addPriority):\n \"\"\"Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.\"\"\"\n <|body_3|>\n\n def DeleteTask(list, deleteTask):\n \"\"\"Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not\"\"\"\n <|body_4|>\n\n def SaveFile(filePath, list):\n \"\"\"Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n if os.path.isfile(filePath):\n file = open(filePath, 'r+')\n fileContents = file.read()\n if not fileContents:\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n else:\n file = open(filePath, 'w')\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n file = open(filePath, 'r')\n for line in file.readlines():\n strData = line\n key = strData.split(',')[0].strip()\n value = strData.split(',')[1].strip()\n dictionary[key] = value\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n<|end_body_1|>\n\n<|body_start_2|>\n for key, value in dictionary.items():\n dictRow = {key: value}\n list.append(dictRow)\n return list\n<|end_body_2|>\n\n<|body_start_3|>\n newDictRow = {addTask: addPriority}\n list.append(newDictRow)\n lastRow = list[-1]\n if newDictRow == lastRow:\n isAdded = True\n else:\n isAdded = False\n return isAdded\n<|end_body_3|>\n\n<|body_start_4|>\n for row in list:\n if deleteTask in row:\n list.remove(row)\n isFound = True\n else:\n isFound = False\n return isFound\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n file = open(filePath, 'w')\n for row in list:\n strData = str(row).strip('[{}]').replace(\"'\", '').replace(':', ',')\n file.write(strData + '\\n')\n file.close()\n isSaved = True\n except Exception as e:\n print('An error occurred:', e)\n isSaved = False\n return isSaved\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000190", "length_bytes": 12114, "license_type": "no_license", "methods": [{"docstring": "Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.", "name": "CreateFile", "signature": "def CreateFile(filePath)"}, {"docstring": "Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.", "name": "LoadData", "signature": "def LoadData(filePath, dictionary)"}, {"docstring": "Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.", "name": "MoveToList", "signature": "def MoveToList(dictionary, list)"}, {"docstring": "Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.", "name": "AddTask", "signature": "def AddTask(list, addTask, addPriority)"}, {"docstring": "Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not", "name": "DeleteTask", "signature": "def DeleteTask(list, deleteTask)"}, {"docstring": "Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.", "name": "SaveFile", "signature": "def SaveFile(filePath, list)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_009016", "prompt": "Implement the Python class `ProcessFile` described below.\n\nClass description:\nImplement the ProcessFile class.\n\nMethod signatures and docstrings:\n- def CreateFile(filePath): Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.\n- def LoadData(filePath, dictionary): Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.\n- def MoveToList(dictionary, list): Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.\n- def AddTask(list, addTask, addPriority): Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.\n- def DeleteTask(list, deleteTask): Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not\n- def SaveFile(filePath, list): Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.", "prompted_full_text": "Implement the Python class `ProcessFile` described below.\n\nClass description:\nImplement the ProcessFile class.\n\nMethod signatures and docstrings:\n- def CreateFile(filePath): Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.\n- def LoadData(filePath, dictionary): Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.\n- def MoveToList(dictionary, list): Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.\n- def AddTask(list, addTask, addPriority): Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.\n- def DeleteTask(list, deleteTask): Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not\n- def SaveFile(filePath, list): Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.\n\n<|skeleton|>\nclass ProcessFile:\n\n def CreateFile(filePath):\n \"\"\"Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.\"\"\"\n <|body_0|>\n\n def LoadData(filePath, dictionary):\n \"\"\"Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.\"\"\"\n <|body_1|>\n\n def MoveToList(dictionary, list):\n \"\"\"Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.\"\"\"\n <|body_2|>\n\n def AddTask(list, addTask, addPriority):\n \"\"\"Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.\"\"\"\n <|body_3|>\n\n def DeleteTask(list, deleteTask):\n \"\"\"Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not\"\"\"\n <|body_4|>\n\n def SaveFile(filePath, list):\n \"\"\"Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n if os.path.isfile(filePath):\n file = open(filePath, 'r+')\n fileContents = file.read()\n if not fileContents:\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n else:\n file = open(filePath, 'w')\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n file = open(filePath, 'r')\n for line in file.readlines():\n strData = line\n key = strData.split(',')[0].strip()\n value = strData.split(',')[1].strip()\n dictionary[key] = value\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n<|end_body_1|>\n\n<|body_start_2|>\n for key, value in dictionary.items():\n dictRow = {key: value}\n list.append(dictRow)\n return list\n<|end_body_2|>\n\n<|body_start_3|>\n newDictRow = {addTask: addPriority}\n list.append(newDictRow)\n lastRow = list[-1]\n if newDictRow == lastRow:\n isAdded = True\n else:\n isAdded = False\n return isAdded\n<|end_body_3|>\n\n<|body_start_4|>\n for row in list:\n if deleteTask in row:\n list.remove(row)\n isFound = True\n else:\n isFound = False\n return isFound\n<|end_body_4|>\n\n<|body_start_5|>\n try:\n file = open(filePath, 'w')\n for row in list:\n strData = str(row).strip('[{}]').replace(\"'\", '').replace(':', ',')\n file.write(strData + '\\n')\n file.close()\n isSaved = True\n except Exception as e:\n print('An error occurred:', e)\n isSaved = False\n return isSaved\n<|end_body_5|>\n", "revision_id": "efb86ed2394b1eded724638bb048eec7aab11819", "skeleton": "<|skeleton|>\nclass ProcessFile:\n\n def CreateFile(filePath):\n \"\"\"Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.\"\"\"\n <|body_0|>\n\n def LoadData(filePath, dictionary):\n \"\"\"Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.\"\"\"\n <|body_1|>\n\n def MoveToList(dictionary, list):\n \"\"\"Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.\"\"\"\n <|body_2|>\n\n def AddTask(list, addTask, addPriority):\n \"\"\"Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.\"\"\"\n <|body_3|>\n\n def DeleteTask(list, deleteTask):\n \"\"\"Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not\"\"\"\n <|body_4|>\n\n def SaveFile(filePath, list):\n \"\"\"Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProcessFile:\n def CreateFile(filePath):\n \"\"\"Creates a file called Todo.txt at filePath if the file doesn't exist. Writes two tasks and priorities to the file if the file contents are empty. :param file - The file at path filePath opened in read or write format :param filePath - The path of the text file Todo.txt :param fileContents - A variable to hold the file contents as Todo.txt is read.\"\"\"\n try:\n if os.path.isfile(filePath):\n file = open(filePath, 'r+')\n fileContents = file.read()\n if not fileContents:\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n else:\n file = open(filePath, 'w')\n file.write('Task , Priority\\nClean House , low\\nPay Bills , high')\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n\n def LoadData(filePath, dictionary):\n \"\"\"Reads the data from Todo.txt and loads it into a Python dictionary. :param filePath - The path of the text file Todo.txt. :param file - The file at path filePath opened in read format :param strData - A variable to contain each line of data in filePath as the file is read. :param key - The key of a particular dictionary row which is the task. :param value - The value of a particular dictionary row, which is the priority of the task. :param dictionary - The dictionary data structure used to hold tasks and their priorities.\"\"\"\n try:\n file = open(filePath, 'r')\n for line in file.readlines():\n strData = line\n key = strData.split(',')[0].strip()\n value = strData.split(',')[1].strip()\n dictionary[key] = value\n file.close()\n except Exception as e:\n print('An error occurred:', e)\n\n def MoveToList(dictionary, list):\n \"\"\"Add dictionary contents to a list. :param dictionary - The dictionary data structure used to hold tasks and their priorities. :param dictRow - A particular dictionary row that will be added to the list. :return list - The list data structure used to hold the dictionary keys and values in a tabular format.\"\"\"\n for key, value in dictionary.items():\n dictRow = {key: value}\n list.append(dictRow)\n return list\n\n def AddTask(list, addTask, addPriority):\n \"\"\"Add task and priority to list. :param addTask - The new task (dictionary key) that will be added. :param addPriority - The priority (dictionary value) for newTask that will be added. :param newDictRow - A new dictionary row holding newTask and newPriority that will be added to the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param lastRow - The last row in the list. :return isAdded - A boolean variable used to indicate whether the new task was added or not to the list.\"\"\"\n newDictRow = {addTask: addPriority}\n list.append(newDictRow)\n lastRow = list[-1]\n if newDictRow == lastRow:\n isAdded = True\n else:\n isAdded = False\n return isAdded\n\n def DeleteTask(list, deleteTask):\n \"\"\"Remove a task and priority from the list. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param deleteTask - The dictionary key representing the task that will be removed from the list. :return isFound - A boolean variable used to indicate whether deleteTask is found in the list or not\"\"\"\n for row in list:\n if deleteTask in row:\n list.remove(row)\n isFound = True\n else:\n isFound = False\n return isFound\n\n def SaveFile(filePath, list):\n \"\"\"Saves the list with its dictionary contents to Todo.txt. :param file - The file at path filePath opened in write format. :param filePath - The path of the text file Todo.txt. :param list - The list data structure used to hold the dictionary keys and values in a tabular format. :param strData - A single row in list converted to the string format that will be written to the text file. :return isSaved - A boolean variable used to indicate whether the file was successfully saved or not.\"\"\"\n try:\n file = open(filePath, 'w')\n for row in list:\n strData = str(row).strip('[{}]').replace(\"'\", '').replace(':', ',')\n file.write(strData + '\\n')\n file.close()\n isSaved = True\n except Exception as e:\n print('An error occurred:', e)\n isSaved = False\n return isSaved\n", "source": "the_stack_v2_python_sparse", "source_path": "Python Static Methods/Static Methods.py", "source_repo": "KrisMichele/Python-Applications", "split": "test", "star_events_count": 0} {"blob_id": "9e5d51eb69414a9ebe68e9fc2c5728c25fb53117", "bodies": ["if not security_services.is_superuser(user_id):\n user_permissions = security_services.get_user_permissions(user_id)\n user_permission_ids = [p.id for p in user_permissions]\n _permission_ids = permission_ids\n if not isinstance(permission_ids, (list, tuple, set)):\n _permission_ids = [permission_ids]\n if not set(_permission_ids) <= set(user_permission_ids):\n message = _('The user[{user_name}] has not permission(s) {permission_ids}.')\n raise AuthorizationException(message.format(user_name=user_id, permission_ids=permission_ids))", "if user_id is None:\n user_id = session_services.get_current_user().id\ntry:\n self.authorize(user_id, permission_ids)\n return True\nexcept AuthorizationException:\n return False"], "bodies_text": "<|body_start_0|>\n if not security_services.is_superuser(user_id):\n user_permissions = security_services.get_user_permissions(user_id)\n user_permission_ids = [p.id for p in user_permissions]\n _permission_ids = permission_ids\n if not isinstance(permission_ids, (list, tuple, set)):\n _permission_ids = [permission_ids]\n if not set(_permission_ids) <= set(user_permission_ids):\n message = _('The user[{user_name}] has not permission(s) {permission_ids}.')\n raise AuthorizationException(message.format(user_name=user_id, permission_ids=permission_ids))\n<|end_body_0|>\n\n<|body_start_1|>\n if user_id is None:\n user_id = session_services.get_current_user().id\n try:\n self.authorize(user_id, permission_ids)\n return True\n except AuthorizationException:\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BaseAuthorizer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseAuthorizer:\n\n def authorize(self, user_id, permission_ids):\n \"\"\"Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID\"\"\"\n <|body_0|>\n\n def is_in_role(self, permission_ids, user_id=None):\n \"\"\"Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not security_services.is_superuser(user_id):\n user_permissions = security_services.get_user_permissions(user_id)\n user_permission_ids = [p.id for p in user_permissions]\n _permission_ids = permission_ids\n if not isinstance(permission_ids, (list, tuple, set)):\n _permission_ids = [permission_ids]\n if not set(_permission_ids) <= set(user_permission_ids):\n message = _('The user[{user_name}] has not permission(s) {permission_ids}.')\n raise AuthorizationException(message.format(user_name=user_id, permission_ids=permission_ids))\n<|end_body_0|>\n\n<|body_start_1|>\n if user_id is None:\n user_id = session_services.get_current_user().id\n try:\n self.authorize(user_id, permission_ids)\n return True\n except AuthorizationException:\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000191", "length_bytes": 1912, "license_type": "no_license", "methods": [{"docstring": "Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID", "name": "authorize", "signature": "def authorize(self, user_id, permission_ids)"}, {"docstring": "Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool", "name": "is_in_role", "signature": "def is_in_role(self, permission_ids, user_id=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_045745", "prompt": "Implement the Python class `BaseAuthorizer` described below.\n\nClass description:\nImplement the BaseAuthorizer class.\n\nMethod signatures and docstrings:\n- def authorize(self, user_id, permission_ids): Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID\n- def is_in_role(self, permission_ids, user_id=None): Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool", "prompted_full_text": "Implement the Python class `BaseAuthorizer` described below.\n\nClass description:\nImplement the BaseAuthorizer class.\n\nMethod signatures and docstrings:\n- def authorize(self, user_id, permission_ids): Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID\n- def is_in_role(self, permission_ids, user_id=None): Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool\n\n<|skeleton|>\nclass BaseAuthorizer:\n\n def authorize(self, user_id, permission_ids):\n \"\"\"Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID\"\"\"\n <|body_0|>\n\n def is_in_role(self, permission_ids, user_id=None):\n \"\"\"Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not security_services.is_superuser(user_id):\n user_permissions = security_services.get_user_permissions(user_id)\n user_permission_ids = [p.id for p in user_permissions]\n _permission_ids = permission_ids\n if not isinstance(permission_ids, (list, tuple, set)):\n _permission_ids = [permission_ids]\n if not set(_permission_ids) <= set(user_permission_ids):\n message = _('The user[{user_name}] has not permission(s) {permission_ids}.')\n raise AuthorizationException(message.format(user_name=user_id, permission_ids=permission_ids))\n<|end_body_0|>\n\n<|body_start_1|>\n if user_id is None:\n user_id = session_services.get_current_user().id\n try:\n self.authorize(user_id, permission_ids)\n return True\n except AuthorizationException:\n return False\n<|end_body_1|>\n", "revision_id": "a2ee333d2a4fe9821f3d24ee15d458f226ffcde5", "skeleton": "<|skeleton|>\nclass BaseAuthorizer:\n\n def authorize(self, user_id, permission_ids):\n \"\"\"Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID\"\"\"\n <|body_0|>\n\n def is_in_role(self, permission_ids, user_id=None):\n \"\"\"Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseAuthorizer:\n def authorize(self, user_id, permission_ids):\n \"\"\"Authorizes user for permissions. @param user_id: user ID @param permission_ids: list of permission ID\"\"\"\n if not security_services.is_superuser(user_id):\n user_permissions = security_services.get_user_permissions(user_id)\n user_permission_ids = [p.id for p in user_permissions]\n _permission_ids = permission_ids\n if not isinstance(permission_ids, (list, tuple, set)):\n _permission_ids = [permission_ids]\n if not set(_permission_ids) <= set(user_permission_ids):\n message = _('The user[{user_name}] has not permission(s) {permission_ids}.')\n raise AuthorizationException(message.format(user_name=user_id, permission_ids=permission_ids))\n\n def is_in_role(self, permission_ids, user_id=None):\n \"\"\"Returns True if current user has permissions. @param permission_ids: list of permission ID @param user_id: user_id to check it's roles @return: bool\"\"\"\n if user_id is None:\n user_id = session_services.get_current_user().id\n try:\n self.authorize(user_id, permission_ids)\n return True\n except AuthorizationException:\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "src/deltapy/security/authorization/authorizer.py", "source_repo": "hamed1361554/sportmagazine-server", "split": "test", "star_events_count": 0} {"blob_id": "68bc92141002ded7ffce105f0ada8ee1e9ee357c", "bodies": ["average = {}\ncarrier = self.helper(root, average)\nsol = len(carrier) * [0]\nfor k, v in carrier.iteritems():\n sol[k] = v['sum'] / float(v['num'])\nreturn sol", "if depth not in carrier:\n carrier[depth] = {}\n carrier[depth]['sum'] = root.val\n carrier[depth]['num'] = 1\nelse:\n carrier[depth]['sum'] += root.val\n carrier[depth]['num'] += 1\nif root.left:\n self.helper(root.left, carrier, depth + 1)\nif root.right:\n self.helper(root.right, carrier, depth + 1)\nif depth == 0:\n return carrier"], "bodies_text": "<|body_start_0|>\n average = {}\n carrier = self.helper(root, average)\n sol = len(carrier) * [0]\n for k, v in carrier.iteritems():\n sol[k] = v['sum'] / float(v['num'])\n return sol\n<|end_body_0|>\n\n<|body_start_1|>\n if depth not in carrier:\n carrier[depth] = {}\n carrier[depth]['sum'] = root.val\n carrier[depth]['num'] = 1\n else:\n carrier[depth]['sum'] += root.val\n carrier[depth]['num'] += 1\n if root.left:\n self.helper(root.left, carrier, depth + 1)\n if root.right:\n self.helper(root.right, carrier, depth + 1)\n if depth == 0:\n return carrier\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def averageOfLevels(self, root):\n \"\"\":type root: TreeNode :rtype: List[float]\"\"\"\n <|body_0|>\n\n def helper(self, root, carrier, depth=0):\n \"\"\":type root: TreeNode :type depth: int :rtype: Dictionary\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n average = {}\n carrier = self.helper(root, average)\n sol = len(carrier) * [0]\n for k, v in carrier.iteritems():\n sol[k] = v['sum'] / float(v['num'])\n return sol\n<|end_body_0|>\n\n<|body_start_1|>\n if depth not in carrier:\n carrier[depth] = {}\n carrier[depth]['sum'] = root.val\n carrier[depth]['num'] = 1\n else:\n carrier[depth]['sum'] += root.val\n carrier[depth]['num'] += 1\n if root.left:\n self.helper(root.left, carrier, depth + 1)\n if root.right:\n self.helper(root.right, carrier, depth + 1)\n if depth == 0:\n return carrier\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000192", "length_bytes": 1532, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: List[float]", "name": "averageOfLevels", "signature": "def averageOfLevels(self, root)"}, {"docstring": ":type root: TreeNode :type depth: int :rtype: Dictionary", "name": "helper", "signature": "def helper(self, root, carrier, depth=0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_017361", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def averageOfLevels(self, root): :type root: TreeNode :rtype: List[float]\n- def helper(self, root, carrier, depth=0): :type root: TreeNode :type depth: int :rtype: Dictionary", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def averageOfLevels(self, root): :type root: TreeNode :rtype: List[float]\n- def helper(self, root, carrier, depth=0): :type root: TreeNode :type depth: int :rtype: Dictionary\n\n<|skeleton|>\nclass Solution:\n\n def averageOfLevels(self, root):\n \"\"\":type root: TreeNode :rtype: List[float]\"\"\"\n <|body_0|>\n\n def helper(self, root, carrier, depth=0):\n \"\"\":type root: TreeNode :type depth: int :rtype: Dictionary\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n average = {}\n carrier = self.helper(root, average)\n sol = len(carrier) * [0]\n for k, v in carrier.iteritems():\n sol[k] = v['sum'] / float(v['num'])\n return sol\n<|end_body_0|>\n\n<|body_start_1|>\n if depth not in carrier:\n carrier[depth] = {}\n carrier[depth]['sum'] = root.val\n carrier[depth]['num'] = 1\n else:\n carrier[depth]['sum'] += root.val\n carrier[depth]['num'] += 1\n if root.left:\n self.helper(root.left, carrier, depth + 1)\n if root.right:\n self.helper(root.right, carrier, depth + 1)\n if depth == 0:\n return carrier\n<|end_body_1|>\n", "revision_id": "61933e7c0b8d8ffef9bd9a4af4fddfdb77568b62", "skeleton": "<|skeleton|>\nclass Solution:\n\n def averageOfLevels(self, root):\n \"\"\":type root: TreeNode :rtype: List[float]\"\"\"\n <|body_0|>\n\n def helper(self, root, carrier, depth=0):\n \"\"\":type root: TreeNode :type depth: int :rtype: Dictionary\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def averageOfLevels(self, root):\n \"\"\":type root: TreeNode :rtype: List[float]\"\"\"\n average = {}\n carrier = self.helper(root, average)\n sol = len(carrier) * [0]\n for k, v in carrier.iteritems():\n sol[k] = v['sum'] / float(v['num'])\n return sol\n\n def helper(self, root, carrier, depth=0):\n \"\"\":type root: TreeNode :type depth: int :rtype: Dictionary\"\"\"\n if depth not in carrier:\n carrier[depth] = {}\n carrier[depth]['sum'] = root.val\n carrier[depth]['num'] = 1\n else:\n carrier[depth]['sum'] += root.val\n carrier[depth]['num'] += 1\n if root.left:\n self.helper(root.left, carrier, depth + 1)\n if root.right:\n self.helper(root.right, carrier, depth + 1)\n if depth == 0:\n return carrier\n", "source": "the_stack_v2_python_sparse", "source_path": "637-Average-of-Levels-in-Binary-Tree.py", "source_repo": "OhMesch/Algorithm-Problems", "split": "test", "star_events_count": 0} {"blob_id": "992d42cc66af8b2e1e5dc49afc2936e2f044ee3d", "bodies": ["self.com = PISM.Context().com\nself.rank = PISM.Context().rank\nself.log = ''\nself.filename = filename\nself.attr = attribute", "if self.rank == 0 and verbosity <= 2:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n self.log = '%s%s: %s' % (self.log, timestamp, message)\n d = PISM.netCDF.Dataset(self.filename, 'a')\n d.__setattr__(self.attr, self.log)\n d.close()\nself.com.barrier()", "if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(self.filename, 'a')\n if self.attr in d.ncattrs():\n self.log += d.__getattr__(self.attr)\n d.close()\nself.com.barrier()", "if filename is None:\n filename = self.filename\nif attribute is None:\n attribute = self.attr\nif PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(filename, 'a')\n d.__setattr__(attribute, self.log)\n d.close()\nself.com.barrier()"], "bodies_text": "<|body_start_0|>\n self.com = PISM.Context().com\n self.rank = PISM.Context().rank\n self.log = ''\n self.filename = filename\n self.attr = attribute\n<|end_body_0|>\n\n<|body_start_1|>\n if self.rank == 0 and verbosity <= 2:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n self.log = '%s%s: %s' % (self.log, timestamp, message)\n d = PISM.netCDF.Dataset(self.filename, 'a')\n d.__setattr__(self.attr, self.log)\n d.close()\n self.com.barrier()\n<|end_body_1|>\n\n<|body_start_2|>\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(self.filename, 'a')\n if self.attr in d.ncattrs():\n self.log += d.__getattr__(self.attr)\n d.close()\n self.com.barrier()\n<|end_body_2|>\n\n<|body_start_3|>\n if filename is None:\n filename = self.filename\n if attribute is None:\n attribute = self.attr\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(filename, 'a')\n d.__setattr__(attribute, self.log)\n d.close()\n self.com.barrier()\n<|end_body_3|>\n", "class_docstring": "Implements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.", "class_name": "CaptureLogger", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CaptureLogger:\n \"\"\"Implements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.\"\"\"\n\n def __init__(self, filename, attribute='pism_log'):\n \"\"\":param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.\"\"\"\n <|body_0|>\n\n def __call__(self, message, verbosity):\n \"\"\"Saves the message to our internal log string and writes the string out to the file.\"\"\"\n <|body_1|>\n\n def readOldLog(self):\n \"\"\"If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.\"\"\"\n <|body_2|>\n\n def write(self, filename=None, attribute=None):\n \"\"\"Save a copy of our log to the specified file and attribute.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.com = PISM.Context().com\n self.rank = PISM.Context().rank\n self.log = ''\n self.filename = filename\n self.attr = attribute\n<|end_body_0|>\n\n<|body_start_1|>\n if self.rank == 0 and verbosity <= 2:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n self.log = '%s%s: %s' % (self.log, timestamp, message)\n d = PISM.netCDF.Dataset(self.filename, 'a')\n d.__setattr__(self.attr, self.log)\n d.close()\n self.com.barrier()\n<|end_body_1|>\n\n<|body_start_2|>\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(self.filename, 'a')\n if self.attr in d.ncattrs():\n self.log += d.__getattr__(self.attr)\n d.close()\n self.com.barrier()\n<|end_body_2|>\n\n<|body_start_3|>\n if filename is None:\n filename = self.filename\n if attribute is None:\n attribute = self.attr\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(filename, 'a')\n d.__setattr__(attribute, self.log)\n d.close()\n self.com.barrier()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000193", "length_bytes": 6632, "license_type": "no_license", "methods": [{"docstring": ":param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.", "name": "__init__", "signature": "def __init__(self, filename, attribute='pism_log')"}, {"docstring": "Saves the message to our internal log string and writes the string out to the file.", "name": "__call__", "signature": "def __call__(self, message, verbosity)"}, {"docstring": "If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.", "name": "readOldLog", "signature": "def readOldLog(self)"}, {"docstring": "Save a copy of our log to the specified file and attribute.", "name": "write", "signature": "def write(self, filename=None, attribute=None)"}], "n_methods": 4, "prompt": "Implement the Python class `CaptureLogger` described below.\n\nClass description:\nImplements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.\n\nMethod signatures and docstrings:\n- def __init__(self, filename, attribute='pism_log'): :param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.\n- def __call__(self, message, verbosity): Saves the message to our internal log string and writes the string out to the file.\n- def readOldLog(self): If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.\n- def write(self, filename=None, attribute=None): Save a copy of our log to the specified file and attribute.", "prompted_full_text": "Implement the Python class `CaptureLogger` described below.\n\nClass description:\nImplements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.\n\nMethod signatures and docstrings:\n- def __init__(self, filename, attribute='pism_log'): :param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.\n- def __call__(self, message, verbosity): Saves the message to our internal log string and writes the string out to the file.\n- def readOldLog(self): If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.\n- def write(self, filename=None, attribute=None): Save a copy of our log to the specified file and attribute.\n\n<|skeleton|>\nclass CaptureLogger:\n \"\"\"Implements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.\"\"\"\n\n def __init__(self, filename, attribute='pism_log'):\n \"\"\":param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.\"\"\"\n <|body_0|>\n\n def __call__(self, message, verbosity):\n \"\"\"Saves the message to our internal log string and writes the string out to the file.\"\"\"\n <|body_1|>\n\n def readOldLog(self):\n \"\"\"If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.\"\"\"\n <|body_2|>\n\n def write(self, filename=None, attribute=None):\n \"\"\"Save a copy of our log to the specified file and attribute.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.com = PISM.Context().com\n self.rank = PISM.Context().rank\n self.log = ''\n self.filename = filename\n self.attr = attribute\n<|end_body_0|>\n\n<|body_start_1|>\n if self.rank == 0 and verbosity <= 2:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n self.log = '%s%s: %s' % (self.log, timestamp, message)\n d = PISM.netCDF.Dataset(self.filename, 'a')\n d.__setattr__(self.attr, self.log)\n d.close()\n self.com.barrier()\n<|end_body_1|>\n\n<|body_start_2|>\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(self.filename, 'a')\n if self.attr in d.ncattrs():\n self.log += d.__getattr__(self.attr)\n d.close()\n self.com.barrier()\n<|end_body_2|>\n\n<|body_start_3|>\n if filename is None:\n filename = self.filename\n if attribute is None:\n attribute = self.attr\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(filename, 'a')\n d.__setattr__(attribute, self.log)\n d.close()\n self.com.barrier()\n<|end_body_3|>\n", "revision_id": "88664f50a2f7075b6e96a06a5976986aac0302ed", "skeleton": "<|skeleton|>\nclass CaptureLogger:\n \"\"\"Implements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.\"\"\"\n\n def __init__(self, filename, attribute='pism_log'):\n \"\"\":param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.\"\"\"\n <|body_0|>\n\n def __call__(self, message, verbosity):\n \"\"\"Saves the message to our internal log string and writes the string out to the file.\"\"\"\n <|body_1|>\n\n def readOldLog(self):\n \"\"\"If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.\"\"\"\n <|body_2|>\n\n def write(self, filename=None, attribute=None):\n \"\"\"Save a copy of our log to the specified file and attribute.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CaptureLogger:\n \"\"\"Implements a logger that appends log messages as they occur to an attribute of an :file:`.nc` file.\"\"\"\n\n def __init__(self, filename, attribute='pism_log'):\n \"\"\":param filename: Name of :file:`.nc` file to save the log to. :param attribute: Attribute name to save the log as.\"\"\"\n self.com = PISM.Context().com\n self.rank = PISM.Context().rank\n self.log = ''\n self.filename = filename\n self.attr = attribute\n\n def __call__(self, message, verbosity):\n \"\"\"Saves the message to our internal log string and writes the string out to the file.\"\"\"\n if self.rank == 0 and verbosity <= 2:\n timestamp = time.strftime('%Y-%m-%d %H:%M:%S')\n self.log = '%s%s: %s' % (self.log, timestamp, message)\n d = PISM.netCDF.Dataset(self.filename, 'a')\n d.__setattr__(self.attr, self.log)\n d.close()\n self.com.barrier()\n\n def readOldLog(self):\n \"\"\"If the :file:`.nc` file we are logging to already has a log, read it in to the log we are about to make so that we append to it rather than overwriting it.\"\"\"\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(self.filename, 'a')\n if self.attr in d.ncattrs():\n self.log += d.__getattr__(self.attr)\n d.close()\n self.com.barrier()\n\n def write(self, filename=None, attribute=None):\n \"\"\"Save a copy of our log to the specified file and attribute.\"\"\"\n if filename is None:\n filename = self.filename\n if attribute is None:\n attribute = self.attr\n if PISM.Context().rank == 0:\n d = PISM.netCDF.Dataset(filename, 'a')\n d.__setattr__(attribute, self.log)\n d.close()\n self.com.barrier()\n", "source": "the_stack_v2_python_sparse", "source_path": "site-packages/PISM/logging.py", "source_repo": "flapo099/test", "split": "test", "star_events_count": 0} {"blob_id": "a0fbb22017bd1c29e7d85b80faa4efdd2459b554", "bodies": ["zappy = _target_eid\nmoved = False\nif zappy in _user._detected_entities:\n atz_x, atz_y = (_level_view.ent_coords(_target_eid)[i] - _user.get_coords()[i] for i in range(2))\n if math.sqrt(atz_x * atz_x + atz_y * atz_y) > 1:\n if atz_x < 0:\n moved = self._try_to_move(DIR.W, _level_view, _user)\n elif atz_x > 0:\n moved = self._try_to_move(DIR.E, _level_view, _user)\n if not moved and atz_y < 0:\n moved = self._try_to_move(DIR.S, _level_view, _user)\n elif not moved and atz_y > 0:\n moved = self._try_to_move(DIR.N, _level_view, _user)\nreturn moved", "adv_x, adv_y = adversary.get_coords()\ncoords_to_next = DIR.get_coords_in_direction_from(direction, adv_x, adv_y)\nif level.cell_is_passable(*coords_to_next):\n cmd_desc = '{0} has moved without foresight to ({1}, {2})'.format(level.ent_coords(adversary.eid), *coords_to_next)\n command = cmpd.CompoundCmd(cmd_desc, LevelMoveEntity(adversary.eid, level, adv_x, adv_y, *coords_to_next))\n level.add_command(command)\n return True\nelse:\n return False"], "bodies_text": "<|body_start_0|>\n zappy = _target_eid\n moved = False\n if zappy in _user._detected_entities:\n atz_x, atz_y = (_level_view.ent_coords(_target_eid)[i] - _user.get_coords()[i] for i in range(2))\n if math.sqrt(atz_x * atz_x + atz_y * atz_y) > 1:\n if atz_x < 0:\n moved = self._try_to_move(DIR.W, _level_view, _user)\n elif atz_x > 0:\n moved = self._try_to_move(DIR.E, _level_view, _user)\n if not moved and atz_y < 0:\n moved = self._try_to_move(DIR.S, _level_view, _user)\n elif not moved and atz_y > 0:\n moved = self._try_to_move(DIR.N, _level_view, _user)\n return moved\n<|end_body_0|>\n\n<|body_start_1|>\n adv_x, adv_y = adversary.get_coords()\n coords_to_next = DIR.get_coords_in_direction_from(direction, adv_x, adv_y)\n if level.cell_is_passable(*coords_to_next):\n cmd_desc = '{0} has moved without foresight to ({1}, {2})'.format(level.ent_coords(adversary.eid), *coords_to_next)\n command = cmpd.CompoundCmd(cmd_desc, LevelMoveEntity(adversary.eid, level, adv_x, adv_y, *coords_to_next))\n level.add_command(command)\n return True\n else:\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BehaviorMoveStupid", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BehaviorMoveStupid:\n\n def _execute_effects(self, _target_eid, _level_view, _user):\n \"\"\":type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool\"\"\"\n <|body_0|>\n\n def _try_to_move(self, direction, level, adversary):\n \"\"\":type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n zappy = _target_eid\n moved = False\n if zappy in _user._detected_entities:\n atz_x, atz_y = (_level_view.ent_coords(_target_eid)[i] - _user.get_coords()[i] for i in range(2))\n if math.sqrt(atz_x * atz_x + atz_y * atz_y) > 1:\n if atz_x < 0:\n moved = self._try_to_move(DIR.W, _level_view, _user)\n elif atz_x > 0:\n moved = self._try_to_move(DIR.E, _level_view, _user)\n if not moved and atz_y < 0:\n moved = self._try_to_move(DIR.S, _level_view, _user)\n elif not moved and atz_y > 0:\n moved = self._try_to_move(DIR.N, _level_view, _user)\n return moved\n<|end_body_0|>\n\n<|body_start_1|>\n adv_x, adv_y = adversary.get_coords()\n coords_to_next = DIR.get_coords_in_direction_from(direction, adv_x, adv_y)\n if level.cell_is_passable(*coords_to_next):\n cmd_desc = '{0} has moved without foresight to ({1}, {2})'.format(level.ent_coords(adversary.eid), *coords_to_next)\n command = cmpd.CompoundCmd(cmd_desc, LevelMoveEntity(adversary.eid, level, adv_x, adv_y, *coords_to_next))\n level.add_command(command)\n return True\n else:\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000194", "length_bytes": 2433, "license_type": "no_license", "methods": [{"docstring": ":type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool", "name": "_execute_effects", "signature": "def _execute_effects(self, _target_eid, _level_view, _user)"}, {"docstring": ":type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary", "name": "_try_to_move", "signature": "def _try_to_move(self, direction, level, adversary)"}], "n_methods": 2, "prompt": "Implement the Python class `BehaviorMoveStupid` described below.\n\nClass description:\nImplement the BehaviorMoveStupid class.\n\nMethod signatures and docstrings:\n- def _execute_effects(self, _target_eid, _level_view, _user): :type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool\n- def _try_to_move(self, direction, level, adversary): :type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary", "prompted_full_text": "Implement the Python class `BehaviorMoveStupid` described below.\n\nClass description:\nImplement the BehaviorMoveStupid class.\n\nMethod signatures and docstrings:\n- def _execute_effects(self, _target_eid, _level_view, _user): :type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool\n- def _try_to_move(self, direction, level, adversary): :type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary\n\n<|skeleton|>\nclass BehaviorMoveStupid:\n\n def _execute_effects(self, _target_eid, _level_view, _user):\n \"\"\":type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool\"\"\"\n <|body_0|>\n\n def _try_to_move(self, direction, level, adversary):\n \"\"\":type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n zappy = _target_eid\n moved = False\n if zappy in _user._detected_entities:\n atz_x, atz_y = (_level_view.ent_coords(_target_eid)[i] - _user.get_coords()[i] for i in range(2))\n if math.sqrt(atz_x * atz_x + atz_y * atz_y) > 1:\n if atz_x < 0:\n moved = self._try_to_move(DIR.W, _level_view, _user)\n elif atz_x > 0:\n moved = self._try_to_move(DIR.E, _level_view, _user)\n if not moved and atz_y < 0:\n moved = self._try_to_move(DIR.S, _level_view, _user)\n elif not moved and atz_y > 0:\n moved = self._try_to_move(DIR.N, _level_view, _user)\n return moved\n<|end_body_0|>\n\n<|body_start_1|>\n adv_x, adv_y = adversary.get_coords()\n coords_to_next = DIR.get_coords_in_direction_from(direction, adv_x, adv_y)\n if level.cell_is_passable(*coords_to_next):\n cmd_desc = '{0} has moved without foresight to ({1}, {2})'.format(level.ent_coords(adversary.eid), *coords_to_next)\n command = cmpd.CompoundCmd(cmd_desc, LevelMoveEntity(adversary.eid, level, adv_x, adv_y, *coords_to_next))\n level.add_command(command)\n return True\n else:\n return False\n<|end_body_1|>\n", "revision_id": "0342700b0edfeedd8e3a8c1fea9bd790d2b8a042", "skeleton": "<|skeleton|>\nclass BehaviorMoveStupid:\n\n def _execute_effects(self, _target_eid, _level_view, _user):\n \"\"\":type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool\"\"\"\n <|body_0|>\n\n def _try_to_move(self, direction, level, adversary):\n \"\"\":type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BehaviorMoveStupid:\n def _execute_effects(self, _target_eid, _level_view, _user):\n \"\"\":type _target_eid: int :type _level_view: level.LevelView.LevelView :type _user: entity.actor.Adversary.Adversary :rtype: bool\"\"\"\n zappy = _target_eid\n moved = False\n if zappy in _user._detected_entities:\n atz_x, atz_y = (_level_view.ent_coords(_target_eid)[i] - _user.get_coords()[i] for i in range(2))\n if math.sqrt(atz_x * atz_x + atz_y * atz_y) > 1:\n if atz_x < 0:\n moved = self._try_to_move(DIR.W, _level_view, _user)\n elif atz_x > 0:\n moved = self._try_to_move(DIR.E, _level_view, _user)\n if not moved and atz_y < 0:\n moved = self._try_to_move(DIR.S, _level_view, _user)\n elif not moved and atz_y > 0:\n moved = self._try_to_move(DIR.N, _level_view, _user)\n return moved\n\n def _try_to_move(self, direction, level, adversary):\n \"\"\":type direction: int :type level: level.LevelView.LevelView :type adversary: entity.actor.Adversary.Adversary\"\"\"\n adv_x, adv_y = adversary.get_coords()\n coords_to_next = DIR.get_coords_in_direction_from(direction, adv_x, adv_y)\n if level.cell_is_passable(*coords_to_next):\n cmd_desc = '{0} has moved without foresight to ({1}, {2})'.format(level.ent_coords(adversary.eid), *coords_to_next)\n command = cmpd.CompoundCmd(cmd_desc, LevelMoveEntity(adversary.eid, level, adv_x, adv_y, *coords_to_next))\n level.add_command(command)\n return True\n else:\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "Python_Zappy/entity/actor/behaviors/BehaviorMoveStupid.py", "source_repo": "MoyTW/Zappy", "split": "test", "star_events_count": 0} {"blob_id": "3775f085a18f1884a6b0708f81feb8d15dc7bf7e", "bodies": ["if cls.ENGINE is not None:\n return cls.ENGINE\nelse:\n if user is None:\n user = cls.MYSQL_USER\n if password is None:\n password = cls.MYSQL_PASS\n if host is None:\n host = cls.MYSQL_HOST\n if dev is False:\n database = cls.MYSQL_DB\n else:\n database = 'mvi_dev'\n engine = sqlalchemy.create_engine('mysql://' + user + ':' + password + '@' + host + '/' + database, echo=echo)\n cls.ENGINE = engine\n return cls.ENGINE", "engine = cls.get_engine()\nSession = sessionmaker(bind=engine, autocommit=True, autoflush=False)\nsession = Session()\nreturn session"], "bodies_text": "<|body_start_0|>\n if cls.ENGINE is not None:\n return cls.ENGINE\n else:\n if user is None:\n user = cls.MYSQL_USER\n if password is None:\n password = cls.MYSQL_PASS\n if host is None:\n host = cls.MYSQL_HOST\n if dev is False:\n database = cls.MYSQL_DB\n else:\n database = 'mvi_dev'\n engine = sqlalchemy.create_engine('mysql://' + user + ':' + password + '@' + host + '/' + database, echo=echo)\n cls.ENGINE = engine\n return cls.ENGINE\n<|end_body_0|>\n\n<|body_start_1|>\n engine = cls.get_engine()\n Session = sessionmaker(bind=engine, autocommit=True, autoflush=False)\n session = Session()\n return session\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Connection", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Connection:\n\n def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False):\n \"\"\"SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance\"\"\"\n <|body_0|>\n\n def create_session(cls):\n \"\"\"create a session object based on engine :return : session\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if cls.ENGINE is not None:\n return cls.ENGINE\n else:\n if user is None:\n user = cls.MYSQL_USER\n if password is None:\n password = cls.MYSQL_PASS\n if host is None:\n host = cls.MYSQL_HOST\n if dev is False:\n database = cls.MYSQL_DB\n else:\n database = 'mvi_dev'\n engine = sqlalchemy.create_engine('mysql://' + user + ':' + password + '@' + host + '/' + database, echo=echo)\n cls.ENGINE = engine\n return cls.ENGINE\n<|end_body_0|>\n\n<|body_start_1|>\n engine = cls.get_engine()\n Session = sessionmaker(bind=engine, autocommit=True, autoflush=False)\n session = Session()\n return session\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000195", "length_bytes": 1766, "license_type": "no_license", "methods": [{"docstring": "SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance", "name": "get_engine", "signature": "def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False)"}, {"docstring": "create a session object based on engine :return : session", "name": "create_session", "signature": "def create_session(cls)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027938", "prompt": "Implement the Python class `Connection` described below.\n\nClass description:\nImplement the Connection class.\n\nMethod signatures and docstrings:\n- def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False): SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance\n- def create_session(cls): create a session object based on engine :return : session", "prompted_full_text": "Implement the Python class `Connection` described below.\n\nClass description:\nImplement the Connection class.\n\nMethod signatures and docstrings:\n- def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False): SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance\n- def create_session(cls): create a session object based on engine :return : session\n\n<|skeleton|>\nclass Connection:\n\n def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False):\n \"\"\"SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance\"\"\"\n <|body_0|>\n\n def create_session(cls):\n \"\"\"create a session object based on engine :return : session\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if cls.ENGINE is not None:\n return cls.ENGINE\n else:\n if user is None:\n user = cls.MYSQL_USER\n if password is None:\n password = cls.MYSQL_PASS\n if host is None:\n host = cls.MYSQL_HOST\n if dev is False:\n database = cls.MYSQL_DB\n else:\n database = 'mvi_dev'\n engine = sqlalchemy.create_engine('mysql://' + user + ':' + password + '@' + host + '/' + database, echo=echo)\n cls.ENGINE = engine\n return cls.ENGINE\n<|end_body_0|>\n\n<|body_start_1|>\n engine = cls.get_engine()\n Session = sessionmaker(bind=engine, autocommit=True, autoflush=False)\n session = Session()\n return session\n<|end_body_1|>\n", "revision_id": "91081877ca221089776acc9816dc907dcd5d2f73", "skeleton": "<|skeleton|>\nclass Connection:\n\n def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False):\n \"\"\"SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance\"\"\"\n <|body_0|>\n\n def create_session(cls):\n \"\"\"create a session object based on engine :return : session\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Connection:\n def get_engine(cls, dev=False, user=None, password=None, host=None, echo=False):\n \"\"\"SQL connections, SQL execution and high-level DB-API interface. :param dev: :param user: :param password: :param host: :param echo: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. :return: SQLAlchemy `Engine` instance\"\"\"\n if cls.ENGINE is not None:\n return cls.ENGINE\n else:\n if user is None:\n user = cls.MYSQL_USER\n if password is None:\n password = cls.MYSQL_PASS\n if host is None:\n host = cls.MYSQL_HOST\n if dev is False:\n database = cls.MYSQL_DB\n else:\n database = 'mvi_dev'\n engine = sqlalchemy.create_engine('mysql://' + user + ':' + password + '@' + host + '/' + database, echo=echo)\n cls.ENGINE = engine\n return cls.ENGINE\n\n def create_session(cls):\n \"\"\"create a session object based on engine :return : session\"\"\"\n engine = cls.get_engine()\n Session = sessionmaker(bind=engine, autocommit=True, autoflush=False)\n session = Session()\n return session\n", "source": "the_stack_v2_python_sparse", "source_path": "utils/database/connection.py", "source_repo": "armsky/MVI", "split": "test", "star_events_count": 0} {"blob_id": "c12468bf2c3975a6978835da5f663d33e1c8ff23", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "ODataStorageServiceServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ODataStorageServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def listODataStorage(self, request, context):\n \"\"\"Storage\"\"\"\n <|body_0|>\n\n def getODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def createODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def updateODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n def deleteODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000196", "length_bytes": 9910, "license_type": "permissive", "methods": [{"docstring": "Storage", "name": "listODataStorage", "signature": "def listODataStorage(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "getODataStorage", "signature": "def getODataStorage(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "createODataStorage", "signature": "def createODataStorage(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "updateODataStorage", "signature": "def updateODataStorage(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "deleteODataStorage", "signature": "def deleteODataStorage(self, request, context)"}], "n_methods": 5, "prompt": "Implement the Python class `ODataStorageServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def listODataStorage(self, request, context): Storage\n- def getODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n- def createODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n- def updateODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n- def deleteODataStorage(self, request, context): Missing associated documentation comment in .proto file.", "prompted_full_text": "Implement the Python class `ODataStorageServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def listODataStorage(self, request, context): Storage\n- def getODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n- def createODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n- def updateODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n- def deleteODataStorage(self, request, context): Missing associated documentation comment in .proto file.\n\n<|skeleton|>\nclass ODataStorageServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def listODataStorage(self, request, context):\n \"\"\"Storage\"\"\"\n <|body_0|>\n\n def getODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def createODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def updateODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n def deleteODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n", "revision_id": "c69e14b409add099d151434b9add711e41f41b20", "skeleton": "<|skeleton|>\nclass ODataStorageServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def listODataStorage(self, request, context):\n \"\"\"Storage\"\"\"\n <|body_0|>\n\n def getODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def createODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def updateODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n def deleteODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ODataStorageServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def listODataStorage(self, request, context):\n \"\"\"Storage\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def getODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def createODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def updateODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def deleteODataStorage(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "python-sdk/src/airavata_mft_sdk/odata/ODataStorageService_pb2_grpc.py", "source_repo": "apache/airavata-mft", "split": "test", "star_events_count": 23} {"blob_id": "622c20658f5afefc7a9176e2641575443538c387", "bodies": ["super().__init__(host, password)\nself._suffix = '/ # '\nself._aqara_property = True", "self.write(b'\\n')\nself.read_until(b'login: ', timeout=1)\npassword = self._password\nif self._password is None:\n password = '\\n'\ncommand = 'root'\nself.write(command.encode() + b'\\n')\nif password:\n self.read_until(b'Password: ', timeout=1)\n self.write(password.encode() + b'\\n')\ncommand = 'stty -echo'\nself.write(command.encode() + b'\\n')\ncommand = 'cd /'\nself.write(command.encode() + b'\\n')\nself.read_until(b'/ # ', timeout=10)\nreturn True"], "bodies_text": "<|body_start_0|>\n super().__init__(host, password)\n self._suffix = '/ # '\n self._aqara_property = True\n<|end_body_0|>\n\n<|body_start_1|>\n self.write(b'\\n')\n self.read_until(b'login: ', timeout=1)\n password = self._password\n if self._password is None:\n password = '\\n'\n command = 'root'\n self.write(command.encode() + b'\\n')\n if password:\n self.read_until(b'Password: ', timeout=1)\n self.write(password.encode() + b'\\n')\n command = 'stty -echo'\n self.write(command.encode() + b'\\n')\n command = 'cd /'\n self.write(command.encode() + b'\\n')\n self.read_until(b'/ # ', timeout=10)\n return True\n<|end_body_1|>\n", "class_docstring": "Telnet Shell", "class_name": "TelnetShellG3", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TelnetShellG3:\n \"\"\"Telnet Shell\"\"\"\n\n def __init__(self, host: str, password=None):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def login(self):\n \"\"\"login function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(host, password)\n self._suffix = '/ # '\n self._aqara_property = True\n<|end_body_0|>\n\n<|body_start_1|>\n self.write(b'\\n')\n self.read_until(b'login: ', timeout=1)\n password = self._password\n if self._password is None:\n password = '\\n'\n command = 'root'\n self.write(command.encode() + b'\\n')\n if password:\n self.read_until(b'Password: ', timeout=1)\n self.write(password.encode() + b'\\n')\n command = 'stty -echo'\n self.write(command.encode() + b'\\n')\n command = 'cd /'\n self.write(command.encode() + b'\\n')\n self.read_until(b'/ # ', timeout=10)\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000197", "length_bytes": 4540, "license_type": "no_license", "methods": [{"docstring": "init", "name": "__init__", "signature": "def __init__(self, host: str, password=None)"}, {"docstring": "login function", "name": "login", "signature": "def login(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009602", "prompt": "Implement the Python class `TelnetShellG3` described below.\n\nClass description:\nTelnet Shell\n\nMethod signatures and docstrings:\n- def __init__(self, host: str, password=None): init\n- def login(self): login function", "prompted_full_text": "Implement the Python class `TelnetShellG3` described below.\n\nClass description:\nTelnet Shell\n\nMethod signatures and docstrings:\n- def __init__(self, host: str, password=None): init\n- def login(self): login function\n\n<|skeleton|>\nclass TelnetShellG3:\n \"\"\"Telnet Shell\"\"\"\n\n def __init__(self, host: str, password=None):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def login(self):\n \"\"\"login function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(host, password)\n self._suffix = '/ # '\n self._aqara_property = True\n<|end_body_0|>\n\n<|body_start_1|>\n self.write(b'\\n')\n self.read_until(b'login: ', timeout=1)\n password = self._password\n if self._password is None:\n password = '\\n'\n command = 'root'\n self.write(command.encode() + b'\\n')\n if password:\n self.read_until(b'Password: ', timeout=1)\n self.write(password.encode() + b'\\n')\n command = 'stty -echo'\n self.write(command.encode() + b'\\n')\n command = 'cd /'\n self.write(command.encode() + b'\\n')\n self.read_until(b'/ # ', timeout=10)\n return True\n<|end_body_1|>\n", "revision_id": "3988d204908478996fffa433faffa9ea20f42562", "skeleton": "<|skeleton|>\nclass TelnetShellG3:\n \"\"\"Telnet Shell\"\"\"\n\n def __init__(self, host: str, password=None):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def login(self):\n \"\"\"login function\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TelnetShellG3:\n \"\"\"Telnet Shell\"\"\"\n\n def __init__(self, host: str, password=None):\n \"\"\"init\"\"\"\n super().__init__(host, password)\n self._suffix = '/ # '\n self._aqara_property = True\n\n def login(self):\n \"\"\"login function\"\"\"\n self.write(b'\\n')\n self.read_until(b'login: ', timeout=1)\n password = self._password\n if self._password is None:\n password = '\\n'\n command = 'root'\n self.write(command.encode() + b'\\n')\n if password:\n self.read_until(b'Password: ', timeout=1)\n self.write(password.encode() + b'\\n')\n command = 'stty -echo'\n self.write(command.encode() + b'\\n')\n command = 'cd /'\n self.write(command.encode() + b'\\n')\n self.read_until(b'/ # ', timeout=10)\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "certificates/custom_components/aqara_camera/core/shell.py", "source_repo": "hellad/hass-config", "split": "test", "star_events_count": 3} {"blob_id": "46f4a667232a58cb1d8e7d4b8459a00c9f47b7ce", "bodies": ["coeffs = [_Element(rng(16)) for i in range(k - 1)]\ncoeffs.append(_Element(secret))\n\ndef make_share(user, coeffs, ssss):\n idx = _Element(user)\n share = _Element(0)\n for coeff in coeffs:\n share = idx * share + coeff\n if ssss:\n share += _Element(user) ** len(coeffs)\n return share.encode()\nreturn [(i, make_share(i, coeffs, ssss)) for i in range(1, n + 1)]", "k = len(shares)\ngf_shares = []\nfor x in shares:\n idx = _Element(x[0])\n value = _Element(x[1])\n if any((y[0] == idx for y in gf_shares)):\n raise ValueError('Duplicate share')\n if ssss:\n value += idx ** k\n gf_shares.append((idx, value))\nresult = _Element(0)\nfor j in range(k):\n x_j, y_j = gf_shares[j]\n numerator = _Element(1)\n denominator = _Element(1)\n for m in range(k):\n x_m = gf_shares[m][0]\n if m != j:\n numerator *= x_m\n denominator *= x_j + x_m\n result += y_j * numerator * denominator.inverse()\nreturn result.encode()"], "bodies_text": "<|body_start_0|>\n coeffs = [_Element(rng(16)) for i in range(k - 1)]\n coeffs.append(_Element(secret))\n\n def make_share(user, coeffs, ssss):\n idx = _Element(user)\n share = _Element(0)\n for coeff in coeffs:\n share = idx * share + coeff\n if ssss:\n share += _Element(user) ** len(coeffs)\n return share.encode()\n return [(i, make_share(i, coeffs, ssss)) for i in range(1, n + 1)]\n<|end_body_0|>\n\n<|body_start_1|>\n k = len(shares)\n gf_shares = []\n for x in shares:\n idx = _Element(x[0])\n value = _Element(x[1])\n if any((y[0] == idx for y in gf_shares)):\n raise ValueError('Duplicate share')\n if ssss:\n value += idx ** k\n gf_shares.append((idx, value))\n result = _Element(0)\n for j in range(k):\n x_j, y_j = gf_shares[j]\n numerator = _Element(1)\n denominator = _Element(1)\n for m in range(k):\n x_m = gf_shares[m][0]\n if m != j:\n numerator *= x_m\n denominator *= x_j + x_m\n result += y_j * numerator * denominator.inverse()\n return result.encode()\n<|end_body_1|>\n", "class_docstring": "Shamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.", "class_name": "Shamir", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Shamir:\n \"\"\"Shamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.\"\"\"\n\n def split(k, n, secret, ssss=False):\n \"\"\"Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)\"\"\"\n <|body_0|>\n\n def combine(shares, ssss=False):\n \"\"\"Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n coeffs = [_Element(rng(16)) for i in range(k - 1)]\n coeffs.append(_Element(secret))\n\n def make_share(user, coeffs, ssss):\n idx = _Element(user)\n share = _Element(0)\n for coeff in coeffs:\n share = idx * share + coeff\n if ssss:\n share += _Element(user) ** len(coeffs)\n return share.encode()\n return [(i, make_share(i, coeffs, ssss)) for i in range(1, n + 1)]\n<|end_body_0|>\n\n<|body_start_1|>\n k = len(shares)\n gf_shares = []\n for x in shares:\n idx = _Element(x[0])\n value = _Element(x[1])\n if any((y[0] == idx for y in gf_shares)):\n raise ValueError('Duplicate share')\n if ssss:\n value += idx ** k\n gf_shares.append((idx, value))\n result = _Element(0)\n for j in range(k):\n x_j, y_j = gf_shares[j]\n numerator = _Element(1)\n denominator = _Element(1)\n for m in range(k):\n x_m = gf_shares[m][0]\n if m != j:\n numerator *= x_m\n denominator *= x_j + x_m\n result += y_j * numerator * denominator.inverse()\n return result.encode()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000198", "length_bytes": 8778, "license_type": "permissive", "methods": [{"docstring": "Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)", "name": "split", "signature": "def split(k, n, secret, ssss=False)"}, {"docstring": "Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).", "name": "combine", "signature": "def combine(shares, ssss=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013104", "prompt": "Implement the Python class `Shamir` described below.\n\nClass description:\nShamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.\n\nMethod signatures and docstrings:\n- def split(k, n, secret, ssss=False): Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)\n- def combine(shares, ssss=False): Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).", "prompted_full_text": "Implement the Python class `Shamir` described below.\n\nClass description:\nShamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.\n\nMethod signatures and docstrings:\n- def split(k, n, secret, ssss=False): Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)\n- def combine(shares, ssss=False): Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).\n\n<|skeleton|>\nclass Shamir:\n \"\"\"Shamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.\"\"\"\n\n def split(k, n, secret, ssss=False):\n \"\"\"Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)\"\"\"\n <|body_0|>\n\n def combine(shares, ssss=False):\n \"\"\"Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n coeffs = [_Element(rng(16)) for i in range(k - 1)]\n coeffs.append(_Element(secret))\n\n def make_share(user, coeffs, ssss):\n idx = _Element(user)\n share = _Element(0)\n for coeff in coeffs:\n share = idx * share + coeff\n if ssss:\n share += _Element(user) ** len(coeffs)\n return share.encode()\n return [(i, make_share(i, coeffs, ssss)) for i in range(1, n + 1)]\n<|end_body_0|>\n\n<|body_start_1|>\n k = len(shares)\n gf_shares = []\n for x in shares:\n idx = _Element(x[0])\n value = _Element(x[1])\n if any((y[0] == idx for y in gf_shares)):\n raise ValueError('Duplicate share')\n if ssss:\n value += idx ** k\n gf_shares.append((idx, value))\n result = _Element(0)\n for j in range(k):\n x_j, y_j = gf_shares[j]\n numerator = _Element(1)\n denominator = _Element(1)\n for m in range(k):\n x_m = gf_shares[m][0]\n if m != j:\n numerator *= x_m\n denominator *= x_j + x_m\n result += y_j * numerator * denominator.inverse()\n return result.encode()\n<|end_body_1|>\n", "revision_id": "fa82044a2dc2f0f1f7454f5394e6d68fa923c289", "skeleton": "<|skeleton|>\nclass Shamir:\n \"\"\"Shamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.\"\"\"\n\n def split(k, n, secret, ssss=False):\n \"\"\"Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)\"\"\"\n <|body_0|>\n\n def combine(shares, ssss=False):\n \"\"\"Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Shamir:\n \"\"\"Shamir's secret sharing scheme. A secret is split into ``n`` shares, and it is sufficient to collect ``k`` of them to reconstruct the secret.\"\"\"\n\n def split(k, n, secret, ssss=False):\n \"\"\"Split a secret into ``n`` shares. The secret can be reconstructed later using just ``k`` shares out of the original ``n``. Each share must be kept confidential to the person it was assigned to. Each share is associated to an index (starting from 1). Args: k (integer): The sufficient number of shares to reconstruct the secret (``k < n``). n (integer): The number of shares that this method will create. secret (byte string): A byte string of 16 bytes (e.g. the AES 128 key). ssss (bool): If ``True``, the shares can be used with the ``ssss`` utility. Default: ``False``. Return (tuples): ``n`` tuples. A tuple is meant for each participant and it contains two items: 1. the unique index (an integer)\"\"\"\n coeffs = [_Element(rng(16)) for i in range(k - 1)]\n coeffs.append(_Element(secret))\n\n def make_share(user, coeffs, ssss):\n idx = _Element(user)\n share = _Element(0)\n for coeff in coeffs:\n share = idx * share + coeff\n if ssss:\n share += _Element(user) ** len(coeffs)\n return share.encode()\n return [(i, make_share(i, coeffs, ssss)) for i in range(1, n + 1)]\n\n def combine(shares, ssss=False):\n \"\"\"Recombine a secret, if enough shares are presented. Args: shares (tuples): The *k* tuples, each containin the index (an integer) and the share (a byte string, 16 bytes long) that were assigned to a participant. ssss (bool): If ``True``, the shares were produced by the ``ssss`` utility. Default: ``False``. Return: The original secret, as a byte string (16 bytes long).\"\"\"\n k = len(shares)\n gf_shares = []\n for x in shares:\n idx = _Element(x[0])\n value = _Element(x[1])\n if any((y[0] == idx for y in gf_shares)):\n raise ValueError('Duplicate share')\n if ssss:\n value += idx ** k\n gf_shares.append((idx, value))\n result = _Element(0)\n for j in range(k):\n x_j, y_j = gf_shares[j]\n numerator = _Element(1)\n denominator = _Element(1)\n for m in range(k):\n x_m = gf_shares[m][0]\n if m != j:\n numerator *= x_m\n denominator *= x_j + x_m\n result += y_j * numerator * denominator.inverse()\n return result.encode()\n", "source": "the_stack_v2_python_sparse", "source_path": "venv/lib/python3.6/site-packages/Crypto/Protocol/SecretSharing.py", "source_repo": "masora1030/eigoyurusan", "split": "test", "star_events_count": 11} {"blob_id": "66902fe3d6b118f31a747bea257ced59774d73f0", "bodies": ["club = self.club_repository.find_by_id(club_id)\nclub.add_student(student_id)\nself.club_repository.save(club)", "club = self.club_repository.find_by_id(club_id)\nclub.approve()\nself.club_repository.save(club)", "club = self.club_repository.find_by_id(club_id)\nclub.quit(student_id)\nself.club_repository.save(club)"], "bodies_text": "<|body_start_0|>\n club = self.club_repository.find_by_id(club_id)\n club.add_student(student_id)\n self.club_repository.save(club)\n<|end_body_0|>\n\n<|body_start_1|>\n club = self.club_repository.find_by_id(club_id)\n club.approve()\n self.club_repository.save(club)\n<|end_body_1|>\n\n<|body_start_2|>\n club = self.club_repository.find_by_id(club_id)\n club.quit(student_id)\n self.club_repository.save(club)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ClubUseCase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ClubUseCase:\n\n def add_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"クラブに生徒を登録します\"\"\"\n <|body_0|>\n\n def approve_club(self, club_id: ClubId) -> None:\n \"\"\"条件を満たせば、クラブを承認します\"\"\"\n <|body_1|>\n\n def quit_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"生徒を退部させます\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n club = self.club_repository.find_by_id(club_id)\n club.add_student(student_id)\n self.club_repository.save(club)\n<|end_body_0|>\n\n<|body_start_1|>\n club = self.club_repository.find_by_id(club_id)\n club.approve()\n self.club_repository.save(club)\n<|end_body_1|>\n\n<|body_start_2|>\n club = self.club_repository.find_by_id(club_id)\n club.quit(student_id)\n self.club_repository.save(club)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000199", "length_bytes": 1052, "license_type": "no_license", "methods": [{"docstring": "クラブに生徒を登録します", "name": "add_student", "signature": "def add_student(self, club_id: ClubId, student_id: StudentId) -> None"}, {"docstring": "条件を満たせば、クラブを承認します", "name": "approve_club", "signature": "def approve_club(self, club_id: ClubId) -> None"}, {"docstring": "生徒を退部させます", "name": "quit_student", "signature": "def quit_student(self, club_id: ClubId, student_id: StudentId) -> None"}], "n_methods": 3, "prompt": "Implement the Python class `ClubUseCase` described below.\n\nClass description:\nImplement the ClubUseCase class.\n\nMethod signatures and docstrings:\n- def add_student(self, club_id: ClubId, student_id: StudentId) -> None: クラブに生徒を登録します\n- def approve_club(self, club_id: ClubId) -> None: 条件を満たせば、クラブを承認します\n- def quit_student(self, club_id: ClubId, student_id: StudentId) -> None: 生徒を退部させます", "prompted_full_text": "Implement the Python class `ClubUseCase` described below.\n\nClass description:\nImplement the ClubUseCase class.\n\nMethod signatures and docstrings:\n- def add_student(self, club_id: ClubId, student_id: StudentId) -> None: クラブに生徒を登録します\n- def approve_club(self, club_id: ClubId) -> None: 条件を満たせば、クラブを承認します\n- def quit_student(self, club_id: ClubId, student_id: StudentId) -> None: 生徒を退部させます\n\n<|skeleton|>\nclass ClubUseCase:\n\n def add_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"クラブに生徒を登録します\"\"\"\n <|body_0|>\n\n def approve_club(self, club_id: ClubId) -> None:\n \"\"\"条件を満たせば、クラブを承認します\"\"\"\n <|body_1|>\n\n def quit_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"生徒を退部させます\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n club = self.club_repository.find_by_id(club_id)\n club.add_student(student_id)\n self.club_repository.save(club)\n<|end_body_0|>\n\n<|body_start_1|>\n club = self.club_repository.find_by_id(club_id)\n club.approve()\n self.club_repository.save(club)\n<|end_body_1|>\n\n<|body_start_2|>\n club = self.club_repository.find_by_id(club_id)\n club.quit(student_id)\n self.club_repository.save(club)\n<|end_body_2|>\n", "revision_id": "30ccb53752f28c7f0302f2d3213a6865735aea5e", "skeleton": "<|skeleton|>\nclass ClubUseCase:\n\n def add_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"クラブに生徒を登録します\"\"\"\n <|body_0|>\n\n def approve_club(self, club_id: ClubId) -> None:\n \"\"\"条件を満たせば、クラブを承認します\"\"\"\n <|body_1|>\n\n def quit_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"生徒を退部させます\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ClubUseCase:\n def add_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"クラブに生徒を登録します\"\"\"\n club = self.club_repository.find_by_id(club_id)\n club.add_student(student_id)\n self.club_repository.save(club)\n\n def approve_club(self, club_id: ClubId) -> None:\n \"\"\"条件を満たせば、クラブを承認します\"\"\"\n club = self.club_repository.find_by_id(club_id)\n club.approve()\n self.club_repository.save(club)\n\n def quit_student(self, club_id: ClubId, student_id: StudentId) -> None:\n \"\"\"生徒を退部させます\"\"\"\n club = self.club_repository.find_by_id(club_id)\n club.quit(student_id)\n self.club_repository.save(club)\n", "source": "the_stack_v2_python_sparse", "source_path": "aggregate_club/src/usecase/club_usecase.py", "source_repo": "mohira/ddd-examples-python", "split": "test", "star_events_count": 2} {"blob_id": "fe6ffdfdcd95bc4f3eb8154aa51fac8cbf0eb985", "bodies": ["data = request.data\nif InterfaceMange.objects.filter(interfaceName=data['interfaceName']):\n return Response({'code': 1001, 'data': '接口已存在'})\ntry:\n serializer = InterfaceMangeListSer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '新建接口成功'})\n else:\n return Response({'code': 1001, 'data': '参数有误'})\nexcept Exception as e:\n return Response({'code': 1001, 'data': str(e)})", "data = request.data\nobj = InterfaceMange.objects.filter(id=data['id']).first()\nif len(data) == 2:\n parent = InterfaceMange.objects.filter(id=data['nodeId']).first().parent.id\n data = {'parent': parent}\n serializer = UpdateTreeNodeSer(obj, data=data)\nelse:\n serializer = InterfaceMangeListSer(obj, data=data)\nif serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '编辑接口成功'})\nelse:\n return Response({'code': 1001, 'data': '编辑接口失败'})", "id = request.GET.get('id')\nobj = InterfaceMange.objects.filter(id=id)\ndata = InterfaceMangeListSer(obj, many=True).data\nreturn Response(data[0])", "id = request.GET.get('id', '')\ntry:\n obj = InterfaceMange.objects.filter(id=id)\n obj.delete()\n return Response({'code': 1000, 'data': '删除接口成功'})\nexcept Exception as e:\n return Response({'code': 1001, 'data': str(e)})"], "bodies_text": "<|body_start_0|>\n data = request.data\n if InterfaceMange.objects.filter(interfaceName=data['interfaceName']):\n return Response({'code': 1001, 'data': '接口已存在'})\n try:\n serializer = InterfaceMangeListSer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '新建接口成功'})\n else:\n return Response({'code': 1001, 'data': '参数有误'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n obj = InterfaceMange.objects.filter(id=data['id']).first()\n if len(data) == 2:\n parent = InterfaceMange.objects.filter(id=data['nodeId']).first().parent.id\n data = {'parent': parent}\n serializer = UpdateTreeNodeSer(obj, data=data)\n else:\n serializer = InterfaceMangeListSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '编辑接口成功'})\n else:\n return Response({'code': 1001, 'data': '编辑接口失败'})\n<|end_body_1|>\n\n<|body_start_2|>\n id = request.GET.get('id')\n obj = InterfaceMange.objects.filter(id=id)\n data = InterfaceMangeListSer(obj, many=True).data\n return Response(data[0])\n<|end_body_2|>\n\n<|body_start_3|>\n id = request.GET.get('id', '')\n try:\n obj = InterfaceMange.objects.filter(id=id)\n obj.delete()\n return Response({'code': 1000, 'data': '删除接口成功'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n<|end_body_3|>\n", "class_docstring": "", "class_name": "InterfaceManageList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InterfaceManageList:\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建接口\"\"\"\n <|body_0|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"编辑接口\"\"\"\n <|body_1|>\n\n def get(self, request, *args, **kwargs):\n \"\"\"获取接口详情\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"删除接口\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.data\n if InterfaceMange.objects.filter(interfaceName=data['interfaceName']):\n return Response({'code': 1001, 'data': '接口已存在'})\n try:\n serializer = InterfaceMangeListSer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '新建接口成功'})\n else:\n return Response({'code': 1001, 'data': '参数有误'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n obj = InterfaceMange.objects.filter(id=data['id']).first()\n if len(data) == 2:\n parent = InterfaceMange.objects.filter(id=data['nodeId']).first().parent.id\n data = {'parent': parent}\n serializer = UpdateTreeNodeSer(obj, data=data)\n else:\n serializer = InterfaceMangeListSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '编辑接口成功'})\n else:\n return Response({'code': 1001, 'data': '编辑接口失败'})\n<|end_body_1|>\n\n<|body_start_2|>\n id = request.GET.get('id')\n obj = InterfaceMange.objects.filter(id=id)\n data = InterfaceMangeListSer(obj, many=True).data\n return Response(data[0])\n<|end_body_2|>\n\n<|body_start_3|>\n id = request.GET.get('id', '')\n try:\n obj = InterfaceMange.objects.filter(id=id)\n obj.delete()\n return Response({'code': 1000, 'data': '删除接口成功'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000200", "length_bytes": 9587, "license_type": "no_license", "methods": [{"docstring": "新建接口", "name": "post", "signature": "def post(self, request, *args, **kwargs)"}, {"docstring": "编辑接口", "name": "put", "signature": "def put(self, request, *args, **kwargs)"}, {"docstring": "获取接口详情", "name": "get", "signature": "def get(self, request, *args, **kwargs)"}, {"docstring": "删除接口", "name": "delete", "signature": "def delete(self, request, *args, **kwargs)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_002262", "prompt": "Implement the Python class `InterfaceManageList` described below.\n\nClass description:\nImplement the InterfaceManageList class.\n\nMethod signatures and docstrings:\n- def post(self, request, *args, **kwargs): 新建接口\n- def put(self, request, *args, **kwargs): 编辑接口\n- def get(self, request, *args, **kwargs): 获取接口详情\n- def delete(self, request, *args, **kwargs): 删除接口", "prompted_full_text": "Implement the Python class `InterfaceManageList` described below.\n\nClass description:\nImplement the InterfaceManageList class.\n\nMethod signatures and docstrings:\n- def post(self, request, *args, **kwargs): 新建接口\n- def put(self, request, *args, **kwargs): 编辑接口\n- def get(self, request, *args, **kwargs): 获取接口详情\n- def delete(self, request, *args, **kwargs): 删除接口\n\n<|skeleton|>\nclass InterfaceManageList:\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建接口\"\"\"\n <|body_0|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"编辑接口\"\"\"\n <|body_1|>\n\n def get(self, request, *args, **kwargs):\n \"\"\"获取接口详情\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"删除接口\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.data\n if InterfaceMange.objects.filter(interfaceName=data['interfaceName']):\n return Response({'code': 1001, 'data': '接口已存在'})\n try:\n serializer = InterfaceMangeListSer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '新建接口成功'})\n else:\n return Response({'code': 1001, 'data': '参数有误'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.data\n obj = InterfaceMange.objects.filter(id=data['id']).first()\n if len(data) == 2:\n parent = InterfaceMange.objects.filter(id=data['nodeId']).first().parent.id\n data = {'parent': parent}\n serializer = UpdateTreeNodeSer(obj, data=data)\n else:\n serializer = InterfaceMangeListSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '编辑接口成功'})\n else:\n return Response({'code': 1001, 'data': '编辑接口失败'})\n<|end_body_1|>\n\n<|body_start_2|>\n id = request.GET.get('id')\n obj = InterfaceMange.objects.filter(id=id)\n data = InterfaceMangeListSer(obj, many=True).data\n return Response(data[0])\n<|end_body_2|>\n\n<|body_start_3|>\n id = request.GET.get('id', '')\n try:\n obj = InterfaceMange.objects.filter(id=id)\n obj.delete()\n return Response({'code': 1000, 'data': '删除接口成功'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n<|end_body_3|>\n", "revision_id": "f2523d6e51cde1b53ac6f453f8066b4b90c523b9", "skeleton": "<|skeleton|>\nclass InterfaceManageList:\n\n def post(self, request, *args, **kwargs):\n \"\"\"新建接口\"\"\"\n <|body_0|>\n\n def put(self, request, *args, **kwargs):\n \"\"\"编辑接口\"\"\"\n <|body_1|>\n\n def get(self, request, *args, **kwargs):\n \"\"\"获取接口详情\"\"\"\n <|body_2|>\n\n def delete(self, request, *args, **kwargs):\n \"\"\"删除接口\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InterfaceManageList:\n def post(self, request, *args, **kwargs):\n \"\"\"新建接口\"\"\"\n data = request.data\n if InterfaceMange.objects.filter(interfaceName=data['interfaceName']):\n return Response({'code': 1001, 'data': '接口已存在'})\n try:\n serializer = InterfaceMangeListSer(data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '新建接口成功'})\n else:\n return Response({'code': 1001, 'data': '参数有误'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n\n def put(self, request, *args, **kwargs):\n \"\"\"编辑接口\"\"\"\n data = request.data\n obj = InterfaceMange.objects.filter(id=data['id']).first()\n if len(data) == 2:\n parent = InterfaceMange.objects.filter(id=data['nodeId']).first().parent.id\n data = {'parent': parent}\n serializer = UpdateTreeNodeSer(obj, data=data)\n else:\n serializer = InterfaceMangeListSer(obj, data=data)\n if serializer.is_valid():\n serializer.save()\n return Response({'code': 1000, 'data': '编辑接口成功'})\n else:\n return Response({'code': 1001, 'data': '编辑接口失败'})\n\n def get(self, request, *args, **kwargs):\n \"\"\"获取接口详情\"\"\"\n id = request.GET.get('id')\n obj = InterfaceMange.objects.filter(id=id)\n data = InterfaceMangeListSer(obj, many=True).data\n return Response(data[0])\n\n def delete(self, request, *args, **kwargs):\n \"\"\"删除接口\"\"\"\n id = request.GET.get('id', '')\n try:\n obj = InterfaceMange.objects.filter(id=id)\n obj.delete()\n return Response({'code': 1000, 'data': '删除接口成功'})\n except Exception as e:\n return Response({'code': 1001, 'data': str(e)})\n", "source": "the_stack_v2_python_sparse", "source_path": "api/interface/rest/interfaceManage.py", "source_repo": "zhuzhanhao1/backend", "split": "test", "star_events_count": 0} {"blob_id": "e8a4c18dea01d6bc736d11eaab7c49bb3d381a3c", "bodies": ["l = ListNode(0)\nwhile l1:\n l.next = ListNode(0)\n if l1.value + l2.value >= 10:\n l.value = l1.value + l2.value - 10\n l.next.value = 1\n else:\n l.value = l.value + l1.value + l2.value\n print('l.value:{0}'.format(l.value))\n l1 = l1.next\n l2 = l2.next\nreturn l", "i, s1, l = (1, 0, l1)\nwhile l:\n s1 += l.value * i\n i = i * 10\n l = l.next\ni, s2, l = (1, 0, l2)\nwhile l:\n s2 += l.value * i\n i *= 10\n l = l.next\ns = s1 + s2\ns = str(s)[::-1]\nres = [ListNode(int(ch)) for ch in s]\nfor i in range(len(res) - 1):\n res[i].next = res[i + 1]\nreturn res[0]"], "bodies_text": "<|body_start_0|>\n l = ListNode(0)\n while l1:\n l.next = ListNode(0)\n if l1.value + l2.value >= 10:\n l.value = l1.value + l2.value - 10\n l.next.value = 1\n else:\n l.value = l.value + l1.value + l2.value\n print('l.value:{0}'.format(l.value))\n l1 = l1.next\n l2 = l2.next\n return l\n<|end_body_0|>\n\n<|body_start_1|>\n i, s1, l = (1, 0, l1)\n while l:\n s1 += l.value * i\n i = i * 10\n l = l.next\n i, s2, l = (1, 0, l2)\n while l:\n s2 += l.value * i\n i *= 10\n l = l.next\n s = s1 + s2\n s = str(s)[::-1]\n res = [ListNode(int(ch)) for ch in s]\n for i in range(len(res) - 1):\n res[i].next = res[i + 1]\n return res[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n <|body_0|>\n\n def addTwoNumbers2(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = ListNode(0)\n while l1:\n l.next = ListNode(0)\n if l1.value + l2.value >= 10:\n l.value = l1.value + l2.value - 10\n l.next.value = 1\n else:\n l.value = l.value + l1.value + l2.value\n print('l.value:{0}'.format(l.value))\n l1 = l1.next\n l2 = l2.next\n return l\n<|end_body_0|>\n\n<|body_start_1|>\n i, s1, l = (1, 0, l1)\n while l:\n s1 += l.value * i\n i = i * 10\n l = l.next\n i, s2, l = (1, 0, l2)\n while l:\n s2 += l.value * i\n i *= 10\n l = l.next\n s = s1 + s2\n s = str(s)[::-1]\n res = [ListNode(int(ch)) for ch in s]\n for i in range(len(res) - 1):\n res[i].next = res[i + 1]\n return res[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000201", "length_bytes": 2007, "license_type": "no_license", "methods": [{"docstring": ":param l1: ListNode :param l2: ListNode :return: ListNode", "name": "addTwoNumbers", "signature": "def addTwoNumbers(self, l1, l2)"}, {"docstring": ":param l1: ListNode :param l2: ListNode :return: ListNode", "name": "addTwoNumbers2", "signature": "def addTwoNumbers2(self, l1, l2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_001220", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def addTwoNumbers(self, l1, l2): :param l1: ListNode :param l2: ListNode :return: ListNode\n- def addTwoNumbers2(self, l1, l2): :param l1: ListNode :param l2: ListNode :return: ListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def addTwoNumbers(self, l1, l2): :param l1: ListNode :param l2: ListNode :return: ListNode\n- def addTwoNumbers2(self, l1, l2): :param l1: ListNode :param l2: ListNode :return: ListNode\n\n<|skeleton|>\nclass Solution:\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n <|body_0|>\n\n def addTwoNumbers2(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = ListNode(0)\n while l1:\n l.next = ListNode(0)\n if l1.value + l2.value >= 10:\n l.value = l1.value + l2.value - 10\n l.next.value = 1\n else:\n l.value = l.value + l1.value + l2.value\n print('l.value:{0}'.format(l.value))\n l1 = l1.next\n l2 = l2.next\n return l\n<|end_body_0|>\n\n<|body_start_1|>\n i, s1, l = (1, 0, l1)\n while l:\n s1 += l.value * i\n i = i * 10\n l = l.next\n i, s2, l = (1, 0, l2)\n while l:\n s2 += l.value * i\n i *= 10\n l = l.next\n s = s1 + s2\n s = str(s)[::-1]\n res = [ListNode(int(ch)) for ch in s]\n for i in range(len(res) - 1):\n res[i].next = res[i + 1]\n return res[0]\n<|end_body_1|>\n", "revision_id": "4f2802d4773eddd2a2e06e61c51463056886b730", "skeleton": "<|skeleton|>\nclass Solution:\n\n def addTwoNumbers(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n <|body_0|>\n\n def addTwoNumbers2(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def addTwoNumbers(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n l = ListNode(0)\n while l1:\n l.next = ListNode(0)\n if l1.value + l2.value >= 10:\n l.value = l1.value + l2.value - 10\n l.next.value = 1\n else:\n l.value = l.value + l1.value + l2.value\n print('l.value:{0}'.format(l.value))\n l1 = l1.next\n l2 = l2.next\n return l\n\n def addTwoNumbers2(self, l1, l2):\n \"\"\":param l1: ListNode :param l2: ListNode :return: ListNode\"\"\"\n i, s1, l = (1, 0, l1)\n while l:\n s1 += l.value * i\n i = i * 10\n l = l.next\n i, s2, l = (1, 0, l2)\n while l:\n s2 += l.value * i\n i *= 10\n l = l.next\n s = s1 + s2\n s = str(s)[::-1]\n res = [ListNode(int(ch)) for ch in s]\n for i in range(len(res) - 1):\n res[i].next = res[i + 1]\n return res[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode2/57_addTwoNumbers(unk).py", "source_repo": "Yara7L/python_algorithm", "split": "test", "star_events_count": 0} {"blob_id": "da050846259ee4899a527550890a95aff9533b78", "bodies": ["plugins_table = Table(['NAME', 'DESCRIPTION'])\nplugins = cli.util.import_modules(cli.util.join_plugin_paths(self.settings, self.config), 'plugins')\nfor plugin in plugins:\n plugins_table.add_row([cli.util.get_module(plugins, plugin).PLUGIN_NAME, cli.util.get_module(plugins, plugin).SHORT_HELP])\nsys.stdout.write('{}\\n'.format(plugins_table))", "config_file = self.config.path\nif not os.path.isfile(config_file):\n raise CLIException(\"Unable to show the config file, '{path}' does not exist\".format(path=config_file))\nwith open(config_file, 'r') as stream:\n try:\n sys.stdout.write('{stream}\\n'.format(stream=stream.read().rstrip()))\n except Exception as exception:\n raise CLIException(\"Unable to read config file '{path}': {error}\".format(path=config_file, error=exception))"], "bodies_text": "<|body_start_0|>\n plugins_table = Table(['NAME', 'DESCRIPTION'])\n plugins = cli.util.import_modules(cli.util.join_plugin_paths(self.settings, self.config), 'plugins')\n for plugin in plugins:\n plugins_table.add_row([cli.util.get_module(plugins, plugin).PLUGIN_NAME, cli.util.get_module(plugins, plugin).SHORT_HELP])\n sys.stdout.write('{}\\n'.format(plugins_table))\n<|end_body_0|>\n\n<|body_start_1|>\n config_file = self.config.path\n if not os.path.isfile(config_file):\n raise CLIException(\"Unable to show the config file, '{path}' does not exist\".format(path=config_file))\n with open(config_file, 'r') as stream:\n try:\n sys.stdout.write('{stream}\\n'.format(stream=stream.read().rstrip()))\n except Exception as exception:\n raise CLIException(\"Unable to read config file '{path}': {error}\".format(path=config_file, error=exception))\n<|end_body_1|>\n", "class_docstring": "The config plugin.", "class_name": "Config", "detected_licenses": ["Apache-2.0", "GPL-2.0-or-later", "BSD-3-Clause", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-protobuf", "LGPL-2.1-only", "PSF-2.0", "BSL-1.0", "MIT", "LicenseRef-scancode-warranty-disclaimer", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Config:\n \"\"\"The config plugin.\"\"\"\n\n def plugins(self, argv):\n \"\"\"Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.\"\"\"\n <|body_0|>\n\n def show(self, argv):\n \"\"\"Show the contents of the configuration file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plugins_table = Table(['NAME', 'DESCRIPTION'])\n plugins = cli.util.import_modules(cli.util.join_plugin_paths(self.settings, self.config), 'plugins')\n for plugin in plugins:\n plugins_table.add_row([cli.util.get_module(plugins, plugin).PLUGIN_NAME, cli.util.get_module(plugins, plugin).SHORT_HELP])\n sys.stdout.write('{}\\n'.format(plugins_table))\n<|end_body_0|>\n\n<|body_start_1|>\n config_file = self.config.path\n if not os.path.isfile(config_file):\n raise CLIException(\"Unable to show the config file, '{path}' does not exist\".format(path=config_file))\n with open(config_file, 'r') as stream:\n try:\n sys.stdout.write('{stream}\\n'.format(stream=stream.read().rstrip()))\n except Exception as exception:\n raise CLIException(\"Unable to read config file '{path}': {error}\".format(path=config_file, error=exception))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000202", "length_bytes": 3245, "license_type": "permissive", "methods": [{"docstring": "Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.", "name": "plugins", "signature": "def plugins(self, argv)"}, {"docstring": "Show the contents of the configuration file.", "name": "show", "signature": "def show(self, argv)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009508", "prompt": "Implement the Python class `Config` described below.\n\nClass description:\nThe config plugin.\n\nMethod signatures and docstrings:\n- def plugins(self, argv): Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.\n- def show(self, argv): Show the contents of the configuration file.", "prompted_full_text": "Implement the Python class `Config` described below.\n\nClass description:\nThe config plugin.\n\nMethod signatures and docstrings:\n- def plugins(self, argv): Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.\n- def show(self, argv): Show the contents of the configuration file.\n\n<|skeleton|>\nclass Config:\n \"\"\"The config plugin.\"\"\"\n\n def plugins(self, argv):\n \"\"\"Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.\"\"\"\n <|body_0|>\n\n def show(self, argv):\n \"\"\"Show the contents of the configuration file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n plugins_table = Table(['NAME', 'DESCRIPTION'])\n plugins = cli.util.import_modules(cli.util.join_plugin_paths(self.settings, self.config), 'plugins')\n for plugin in plugins:\n plugins_table.add_row([cli.util.get_module(plugins, plugin).PLUGIN_NAME, cli.util.get_module(plugins, plugin).SHORT_HELP])\n sys.stdout.write('{}\\n'.format(plugins_table))\n<|end_body_0|>\n\n<|body_start_1|>\n config_file = self.config.path\n if not os.path.isfile(config_file):\n raise CLIException(\"Unable to show the config file, '{path}' does not exist\".format(path=config_file))\n with open(config_file, 'r') as stream:\n try:\n sys.stdout.write('{stream}\\n'.format(stream=stream.read().rstrip()))\n except Exception as exception:\n raise CLIException(\"Unable to read config file '{path}': {error}\".format(path=config_file, error=exception))\n<|end_body_1|>\n", "revision_id": "8856d6fba11281df898fd65b0cafa1e20eb90fe8", "skeleton": "<|skeleton|>\nclass Config:\n \"\"\"The config plugin.\"\"\"\n\n def plugins(self, argv):\n \"\"\"Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.\"\"\"\n <|body_0|>\n\n def show(self, argv):\n \"\"\"Show the contents of the configuration file.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Config:\n \"\"\"The config plugin.\"\"\"\n\n def plugins(self, argv):\n \"\"\"Parse and load the builtin plugins and the ones in the configuration file. If this method is called using 'mesos config plugins', it displays the plugins that can be used.\"\"\"\n plugins_table = Table(['NAME', 'DESCRIPTION'])\n plugins = cli.util.import_modules(cli.util.join_plugin_paths(self.settings, self.config), 'plugins')\n for plugin in plugins:\n plugins_table.add_row([cli.util.get_module(plugins, plugin).PLUGIN_NAME, cli.util.get_module(plugins, plugin).SHORT_HELP])\n sys.stdout.write('{}\\n'.format(plugins_table))\n\n def show(self, argv):\n \"\"\"Show the contents of the configuration file.\"\"\"\n config_file = self.config.path\n if not os.path.isfile(config_file):\n raise CLIException(\"Unable to show the config file, '{path}' does not exist\".format(path=config_file))\n with open(config_file, 'r') as stream:\n try:\n sys.stdout.write('{stream}\\n'.format(stream=stream.read().rstrip()))\n except Exception as exception:\n raise CLIException(\"Unable to read config file '{path}': {error}\".format(path=config_file, error=exception))\n", "source": "the_stack_v2_python_sparse", "source_path": "src/python/cli_new/lib/cli/plugins/config/main.py", "source_repo": "apache/mesos", "split": "test", "star_events_count": 4860} {"blob_id": "c1a1d7fcd19dc43ac79674b1797547c352977614", "bodies": ["self.k = k\nself.nums = sorted(nums)\nself.length = len(self.nums)", "self.nums.append(val)\nself.nums.sort()\nself.length += 1\nreturn self.nums[-self.k] if 0 < self.k <= self.length else self.nums[-1]"], "bodies_text": "<|body_start_0|>\n self.k = k\n self.nums = sorted(nums)\n self.length = len(self.nums)\n<|end_body_0|>\n\n<|body_start_1|>\n self.nums.append(val)\n self.nums.sort()\n self.length += 1\n return self.nums[-self.k] if 0 < self.k <= self.length else self.nums[-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KthLargest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KthLargest:\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.nums = sorted(nums)\n self.length = len(self.nums)\n<|end_body_0|>\n\n<|body_start_1|>\n self.nums.append(val)\n self.nums.sort()\n self.length += 1\n return self.nums[-self.k] if 0 < self.k <= self.length else self.nums[-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000203", "length_bytes": 745, "license_type": "no_license", "methods": [{"docstring": ":type k: int :type nums: List[int]", "name": "__init__", "signature": "def __init__(self, k, nums)"}, {"docstring": ":type val: int :rtype: int", "name": "add", "signature": "def add(self, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041441", "prompt": "Implement the Python class `KthLargest` described below.\n\nClass description:\nImplement the KthLargest class.\n\nMethod signatures and docstrings:\n- def __init__(self, k, nums): :type k: int :type nums: List[int]\n- def add(self, val): :type val: int :rtype: int", "prompted_full_text": "Implement the Python class `KthLargest` described below.\n\nClass description:\nImplement the KthLargest class.\n\nMethod signatures and docstrings:\n- def __init__(self, k, nums): :type k: int :type nums: List[int]\n- def add(self, val): :type val: int :rtype: int\n\n<|skeleton|>\nclass KthLargest:\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.k = k\n self.nums = sorted(nums)\n self.length = len(self.nums)\n<|end_body_0|>\n\n<|body_start_1|>\n self.nums.append(val)\n self.nums.sort()\n self.length += 1\n return self.nums[-self.k] if 0 < self.k <= self.length else self.nums[-1]\n<|end_body_1|>\n", "revision_id": "70bdd75b6af2e1811c1beab22050c01d28d7373e", "skeleton": "<|skeleton|>\nclass KthLargest:\n\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n <|body_0|>\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class KthLargest:\n def __init__(self, k, nums):\n \"\"\":type k: int :type nums: List[int]\"\"\"\n self.k = k\n self.nums = sorted(nums)\n self.length = len(self.nums)\n\n def add(self, val):\n \"\"\":type val: int :rtype: int\"\"\"\n self.nums.append(val)\n self.nums.sort()\n self.length += 1\n return self.nums[-self.k] if 0 < self.k <= self.length else self.nums[-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "python/leetcode_bak/703_Kth_Largest_Element_in_a_Stream.py", "source_repo": "bobcaoge/my-code", "split": "test", "star_events_count": 0} {"blob_id": "371c243d136a6e7d5e280afc4ace3a7c59b26b1c", "bodies": ["DataUpdateCoordinator.__init__(self, hass, _LOGGER, name=f'{DOMAIN}-{device.device_info.name}', update_interval=timedelta(seconds=60))\nself.device = device\nself._error_count = 0", "try:\n await self.device.update_state()\nexcept DeviceTimeoutError as error:\n self._error_count += 1\n if self.last_update_success and self._error_count >= MAX_ERRORS:\n _LOGGER.warning('Device is unavailable: %s (%s)', self.name, self.device.device_info)\n raise UpdateFailed(error) from error\nelse:\n if not self.last_update_success and self._error_count:\n _LOGGER.warning('Device is available: %s (%s)', self.name, str(self.device.device_info))\n self._error_count = 0", "try:\n return await self.device.push_state_update()\nexcept DeviceTimeoutError:\n _LOGGER.warning('Timeout send state update to: %s (%s)', self.name, self.device.device_info)"], "bodies_text": "<|body_start_0|>\n DataUpdateCoordinator.__init__(self, hass, _LOGGER, name=f'{DOMAIN}-{device.device_info.name}', update_interval=timedelta(seconds=60))\n self.device = device\n self._error_count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n await self.device.update_state()\n except DeviceTimeoutError as error:\n self._error_count += 1\n if self.last_update_success and self._error_count >= MAX_ERRORS:\n _LOGGER.warning('Device is unavailable: %s (%s)', self.name, self.device.device_info)\n raise UpdateFailed(error) from error\n else:\n if not self.last_update_success and self._error_count:\n _LOGGER.warning('Device is available: %s (%s)', self.name, str(self.device.device_info))\n self._error_count = 0\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return await self.device.push_state_update()\n except DeviceTimeoutError:\n _LOGGER.warning('Timeout send state update to: %s (%s)', self.name, self.device.device_info)\n<|end_body_2|>\n", "class_docstring": "Manages polling for state changes from the device.", "class_name": "DeviceDataUpdateCoordinator", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeviceDataUpdateCoordinator:\n \"\"\"Manages polling for state changes from the device.\"\"\"\n\n def __init__(self, hass: HomeAssistant, device: Device):\n \"\"\"Initialize the data update coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self):\n \"\"\"Update the state of the device.\"\"\"\n <|body_1|>\n\n async def push_state_update(self):\n \"\"\"Send state updates to the physical device.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n DataUpdateCoordinator.__init__(self, hass, _LOGGER, name=f'{DOMAIN}-{device.device_info.name}', update_interval=timedelta(seconds=60))\n self.device = device\n self._error_count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n await self.device.update_state()\n except DeviceTimeoutError as error:\n self._error_count += 1\n if self.last_update_success and self._error_count >= MAX_ERRORS:\n _LOGGER.warning('Device is unavailable: %s (%s)', self.name, self.device.device_info)\n raise UpdateFailed(error) from error\n else:\n if not self.last_update_success and self._error_count:\n _LOGGER.warning('Device is available: %s (%s)', self.name, str(self.device.device_info))\n self._error_count = 0\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return await self.device.push_state_update()\n except DeviceTimeoutError:\n _LOGGER.warning('Timeout send state update to: %s (%s)', self.name, self.device.device_info)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000204", "length_bytes": 3223, "license_type": "permissive", "methods": [{"docstring": "Initialize the data update coordinator.", "name": "__init__", "signature": "def __init__(self, hass: HomeAssistant, device: Device)"}, {"docstring": "Update the state of the device.", "name": "_async_update_data", "signature": "async def _async_update_data(self)"}, {"docstring": "Send state updates to the physical device.", "name": "push_state_update", "signature": "async def push_state_update(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_002123", "prompt": "Implement the Python class `DeviceDataUpdateCoordinator` described below.\n\nClass description:\nManages polling for state changes from the device.\n\nMethod signatures and docstrings:\n- def __init__(self, hass: HomeAssistant, device: Device): Initialize the data update coordinator.\n- async def _async_update_data(self): Update the state of the device.\n- async def push_state_update(self): Send state updates to the physical device.", "prompted_full_text": "Implement the Python class `DeviceDataUpdateCoordinator` described below.\n\nClass description:\nManages polling for state changes from the device.\n\nMethod signatures and docstrings:\n- def __init__(self, hass: HomeAssistant, device: Device): Initialize the data update coordinator.\n- async def _async_update_data(self): Update the state of the device.\n- async def push_state_update(self): Send state updates to the physical device.\n\n<|skeleton|>\nclass DeviceDataUpdateCoordinator:\n \"\"\"Manages polling for state changes from the device.\"\"\"\n\n def __init__(self, hass: HomeAssistant, device: Device):\n \"\"\"Initialize the data update coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self):\n \"\"\"Update the state of the device.\"\"\"\n <|body_1|>\n\n async def push_state_update(self):\n \"\"\"Send state updates to the physical device.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n DataUpdateCoordinator.__init__(self, hass, _LOGGER, name=f'{DOMAIN}-{device.device_info.name}', update_interval=timedelta(seconds=60))\n self.device = device\n self._error_count = 0\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n await self.device.update_state()\n except DeviceTimeoutError as error:\n self._error_count += 1\n if self.last_update_success and self._error_count >= MAX_ERRORS:\n _LOGGER.warning('Device is unavailable: %s (%s)', self.name, self.device.device_info)\n raise UpdateFailed(error) from error\n else:\n if not self.last_update_success and self._error_count:\n _LOGGER.warning('Device is available: %s (%s)', self.name, str(self.device.device_info))\n self._error_count = 0\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n return await self.device.push_state_update()\n except DeviceTimeoutError:\n _LOGGER.warning('Timeout send state update to: %s (%s)', self.name, self.device.device_info)\n<|end_body_2|>\n", "revision_id": "4ab0151fb1cbefb31def23ba850e197da0a5027f", "skeleton": "<|skeleton|>\nclass DeviceDataUpdateCoordinator:\n \"\"\"Manages polling for state changes from the device.\"\"\"\n\n def __init__(self, hass: HomeAssistant, device: Device):\n \"\"\"Initialize the data update coordinator.\"\"\"\n <|body_0|>\n\n async def _async_update_data(self):\n \"\"\"Update the state of the device.\"\"\"\n <|body_1|>\n\n async def push_state_update(self):\n \"\"\"Send state updates to the physical device.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeviceDataUpdateCoordinator:\n \"\"\"Manages polling for state changes from the device.\"\"\"\n\n def __init__(self, hass: HomeAssistant, device: Device):\n \"\"\"Initialize the data update coordinator.\"\"\"\n DataUpdateCoordinator.__init__(self, hass, _LOGGER, name=f'{DOMAIN}-{device.device_info.name}', update_interval=timedelta(seconds=60))\n self.device = device\n self._error_count = 0\n\n async def _async_update_data(self):\n \"\"\"Update the state of the device.\"\"\"\n try:\n await self.device.update_state()\n except DeviceTimeoutError as error:\n self._error_count += 1\n if self.last_update_success and self._error_count >= MAX_ERRORS:\n _LOGGER.warning('Device is unavailable: %s (%s)', self.name, self.device.device_info)\n raise UpdateFailed(error) from error\n else:\n if not self.last_update_success and self._error_count:\n _LOGGER.warning('Device is available: %s (%s)', self.name, str(self.device.device_info))\n self._error_count = 0\n\n async def push_state_update(self):\n \"\"\"Send state updates to the physical device.\"\"\"\n try:\n return await self.device.push_state_update()\n except DeviceTimeoutError:\n _LOGGER.warning('Timeout send state update to: %s (%s)', self.name, self.device.device_info)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/gree/bridge.py", "source_repo": "turbokongen/home-assistant", "split": "test", "star_events_count": 4} {"blob_id": "89d41eadf011da32289d8e71e4f771e6950d6787", "bodies": ["self.convertUnits = convert\nself.eventsDir = os.path.join(processedDataDir, 'events/Neuropix-PXI-100.0/TTL_1/')\nself.infoDir = os.path.join(processedDataDir, 'info')\nchannelsFile = 'channels.npy'\nchannelStatesFile = 'channel_states.npy'\nfullWordsFile = 'full_words.npy'\ntimestampsFile = 'timestamps.npy'\nself.firstTimestamp, self.samplingRate = read_recording_info(processedDataDir)\nself.channels = np.load(os.path.join(self.eventsDir, channelsFile))\nself.channelStates = np.load(os.path.join(self.eventsDir, channelStatesFile))\nself.fullWords = np.load(os.path.join(self.eventsDir, fullWordsFile))\nself.timestamps = np.load(os.path.join(self.eventsDir, timestampsFile))\nif self.convertUnits:\n self.timestamps = (self.timestamps - self.firstTimestamp) / self.samplingRate", "thisStateThisChannel = (self.channelStates == channelState) & (self.channels == eventChannel)\neventOnsetTimes = self.timestamps[thisStateThisChannel]\nreturn eventOnsetTimes"], "bodies_text": "<|body_start_0|>\n self.convertUnits = convert\n self.eventsDir = os.path.join(processedDataDir, 'events/Neuropix-PXI-100.0/TTL_1/')\n self.infoDir = os.path.join(processedDataDir, 'info')\n channelsFile = 'channels.npy'\n channelStatesFile = 'channel_states.npy'\n fullWordsFile = 'full_words.npy'\n timestampsFile = 'timestamps.npy'\n self.firstTimestamp, self.samplingRate = read_recording_info(processedDataDir)\n self.channels = np.load(os.path.join(self.eventsDir, channelsFile))\n self.channelStates = np.load(os.path.join(self.eventsDir, channelStatesFile))\n self.fullWords = np.load(os.path.join(self.eventsDir, fullWordsFile))\n self.timestamps = np.load(os.path.join(self.eventsDir, timestampsFile))\n if self.convertUnits:\n self.timestamps = (self.timestamps - self.firstTimestamp) / self.samplingRate\n<|end_body_0|>\n\n<|body_start_1|>\n thisStateThisChannel = (self.channelStates == channelState) & (self.channels == eventChannel)\n eventOnsetTimes = self.timestamps[thisStateThisChannel]\n return eventOnsetTimes\n<|end_body_1|>\n", "class_docstring": "Class for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.", "class_name": "Events", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Events:\n \"\"\"Class for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.\"\"\"\n\n def __init__(self, processedDataDir, convert=True):\n \"\"\"Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.\"\"\"\n <|body_0|>\n\n def get_onset_times(self, eventChannel=1, channelState=1):\n \"\"\"Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.convertUnits = convert\n self.eventsDir = os.path.join(processedDataDir, 'events/Neuropix-PXI-100.0/TTL_1/')\n self.infoDir = os.path.join(processedDataDir, 'info')\n channelsFile = 'channels.npy'\n channelStatesFile = 'channel_states.npy'\n fullWordsFile = 'full_words.npy'\n timestampsFile = 'timestamps.npy'\n self.firstTimestamp, self.samplingRate = read_recording_info(processedDataDir)\n self.channels = np.load(os.path.join(self.eventsDir, channelsFile))\n self.channelStates = np.load(os.path.join(self.eventsDir, channelStatesFile))\n self.fullWords = np.load(os.path.join(self.eventsDir, fullWordsFile))\n self.timestamps = np.load(os.path.join(self.eventsDir, timestampsFile))\n if self.convertUnits:\n self.timestamps = (self.timestamps - self.firstTimestamp) / self.samplingRate\n<|end_body_0|>\n\n<|body_start_1|>\n thisStateThisChannel = (self.channelStates == channelState) & (self.channels == eventChannel)\n eventOnsetTimes = self.timestamps[thisStateThisChannel]\n return eventOnsetTimes\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000205", "length_bytes": 17989, "license_type": "no_license", "methods": [{"docstring": "Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.", "name": "__init__", "signature": "def __init__(self, processedDataDir, convert=True)"}, {"docstring": "Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.", "name": "get_onset_times", "signature": "def get_onset_times(self, eventChannel=1, channelState=1)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039321", "prompt": "Implement the Python class `Events` described below.\n\nClass description:\nClass for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.\n\nMethod signatures and docstrings:\n- def __init__(self, processedDataDir, convert=True): Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.\n- def get_onset_times(self, eventChannel=1, channelState=1): Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.", "prompted_full_text": "Implement the Python class `Events` described below.\n\nClass description:\nClass for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.\n\nMethod signatures and docstrings:\n- def __init__(self, processedDataDir, convert=True): Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.\n- def get_onset_times(self, eventChannel=1, channelState=1): Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.\n\n<|skeleton|>\nclass Events:\n \"\"\"Class for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.\"\"\"\n\n def __init__(self, processedDataDir, convert=True):\n \"\"\"Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.\"\"\"\n <|body_0|>\n\n def get_onset_times(self, eventChannel=1, channelState=1):\n \"\"\"Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.convertUnits = convert\n self.eventsDir = os.path.join(processedDataDir, 'events/Neuropix-PXI-100.0/TTL_1/')\n self.infoDir = os.path.join(processedDataDir, 'info')\n channelsFile = 'channels.npy'\n channelStatesFile = 'channel_states.npy'\n fullWordsFile = 'full_words.npy'\n timestampsFile = 'timestamps.npy'\n self.firstTimestamp, self.samplingRate = read_recording_info(processedDataDir)\n self.channels = np.load(os.path.join(self.eventsDir, channelsFile))\n self.channelStates = np.load(os.path.join(self.eventsDir, channelStatesFile))\n self.fullWords = np.load(os.path.join(self.eventsDir, fullWordsFile))\n self.timestamps = np.load(os.path.join(self.eventsDir, timestampsFile))\n if self.convertUnits:\n self.timestamps = (self.timestamps - self.firstTimestamp) / self.samplingRate\n<|end_body_0|>\n\n<|body_start_1|>\n thisStateThisChannel = (self.channelStates == channelState) & (self.channels == eventChannel)\n eventOnsetTimes = self.timestamps[thisStateThisChannel]\n return eventOnsetTimes\n<|end_body_1|>\n", "revision_id": "0a4a0d2700427acf00de0b9ed66f0b64c02fdc43", "skeleton": "<|skeleton|>\nclass Events:\n \"\"\"Class for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.\"\"\"\n\n def __init__(self, processedDataDir, convert=True):\n \"\"\"Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.\"\"\"\n <|body_0|>\n\n def get_onset_times(self, eventChannel=1, channelState=1):\n \"\"\"Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Events:\n \"\"\"Class for loading TTL events. Note that timestamps for events are stored relative to the start of acquisition (play button), not recording. This class can make the right conversion to match spike data from kilosort.\"\"\"\n\n def __init__(self, processedDataDir, convert=True):\n \"\"\"Args: processedDataDir (str): path to root of neuropixels raw data for a given session. convert(bool): if True, convert timestamps to seconds.\"\"\"\n self.convertUnits = convert\n self.eventsDir = os.path.join(processedDataDir, 'events/Neuropix-PXI-100.0/TTL_1/')\n self.infoDir = os.path.join(processedDataDir, 'info')\n channelsFile = 'channels.npy'\n channelStatesFile = 'channel_states.npy'\n fullWordsFile = 'full_words.npy'\n timestampsFile = 'timestamps.npy'\n self.firstTimestamp, self.samplingRate = read_recording_info(processedDataDir)\n self.channels = np.load(os.path.join(self.eventsDir, channelsFile))\n self.channelStates = np.load(os.path.join(self.eventsDir, channelStatesFile))\n self.fullWords = np.load(os.path.join(self.eventsDir, fullWordsFile))\n self.timestamps = np.load(os.path.join(self.eventsDir, timestampsFile))\n if self.convertUnits:\n self.timestamps = (self.timestamps - self.firstTimestamp) / self.samplingRate\n\n def get_onset_times(self, eventChannel=1, channelState=1):\n \"\"\"Get the onset times for specific events. Args: eventChannel (int): The openEphys DIO channel that recieves the event. channelState (int): 1 for onset, -1 for offset Returns: eventOnsetTimes (array): An array of the timestamps of the event onsets.\"\"\"\n thisStateThisChannel = (self.channelStates == channelState) & (self.channels == eventChannel)\n eventOnsetTimes = self.timestamps[thisStateThisChannel]\n return eventOnsetTimes\n", "source": "the_stack_v2_python_sparse", "source_path": "jaratoolbox/loadneuropix.py", "source_repo": "sjara/jaratoolbox", "split": "test", "star_events_count": 3} {"blob_id": "e162650efa1f011cb0dfdcd04107f4b2b6bfa46e", "bodies": ["super(MakeLayer2, self).__init__()\nself.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\nself.b = block(out_channels, out_channels, stride=1)\nself.c = block(out_channels, out_channels, stride=1)\nself.d = block(out_channels, out_channels, stride=1)\nself.e = block(out_channels, out_channels, stride=1)\nself.f = block(out_channels, out_channels, stride=1)", "x = self.a(x)\nx = self.b(x)\nx = self.c(x)\nx = self.d(x)\nx = self.e(x)\nx = self.f(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super(MakeLayer2, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n self.d = block(out_channels, out_channels, stride=1)\n self.e = block(out_channels, out_channels, stride=1)\n self.f = block(out_channels, out_channels, stride=1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n x = self.d(x)\n x = self.e(x)\n x = self.f(x)\n return x\n<|end_body_1|>\n", "class_docstring": "MakeLayer2", "class_name": "MakeLayer2", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MakeLayer2:\n \"\"\"MakeLayer2\"\"\"\n\n def __init__(self, block, in_channels, out_channels, stride):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MakeLayer2, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n self.d = block(out_channels, out_channels, stride=1)\n self.e = block(out_channels, out_channels, stride=1)\n self.f = block(out_channels, out_channels, stride=1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n x = self.d(x)\n x = self.e(x)\n x = self.f(x)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000206", "length_bytes": 18023, "license_type": "no_license", "methods": [{"docstring": "init", "name": "__init__", "signature": "def __init__(self, block, in_channels, out_channels, stride)"}, {"docstring": "construct", "name": "construct", "signature": "def construct(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_017154", "prompt": "Implement the Python class `MakeLayer2` described below.\n\nClass description:\nMakeLayer2\n\nMethod signatures and docstrings:\n- def __init__(self, block, in_channels, out_channels, stride): init\n- def construct(self, x): construct", "prompted_full_text": "Implement the Python class `MakeLayer2` described below.\n\nClass description:\nMakeLayer2\n\nMethod signatures and docstrings:\n- def __init__(self, block, in_channels, out_channels, stride): init\n- def construct(self, x): construct\n\n<|skeleton|>\nclass MakeLayer2:\n \"\"\"MakeLayer2\"\"\"\n\n def __init__(self, block, in_channels, out_channels, stride):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MakeLayer2, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n self.d = block(out_channels, out_channels, stride=1)\n self.e = block(out_channels, out_channels, stride=1)\n self.f = block(out_channels, out_channels, stride=1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n x = self.d(x)\n x = self.e(x)\n x = self.f(x)\n return x\n<|end_body_1|>\n", "revision_id": "8be1c70c44913a6f67dd424aa0e0330f82e48b06", "skeleton": "<|skeleton|>\nclass MakeLayer2:\n \"\"\"MakeLayer2\"\"\"\n\n def __init__(self, block, in_channels, out_channels, stride):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MakeLayer2:\n \"\"\"MakeLayer2\"\"\"\n\n def __init__(self, block, in_channels, out_channels, stride):\n \"\"\"init\"\"\"\n super(MakeLayer2, self).__init__()\n self.a = ResidualBlockWithDown(in_channels, out_channels, stride=stride, down_sample=True)\n self.b = block(out_channels, out_channels, stride=1)\n self.c = block(out_channels, out_channels, stride=1)\n self.d = block(out_channels, out_channels, stride=1)\n self.e = block(out_channels, out_channels, stride=1)\n self.f = block(out_channels, out_channels, stride=1)\n\n def construct(self, x):\n \"\"\"construct\"\"\"\n x = self.a(x)\n x = self.b(x)\n x = self.c(x)\n x = self.d(x)\n x = self.e(x)\n x = self.f(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "back-end/logs/resnet/resnet_data_runner.py", "source_repo": "ZeroWangZY/DL-VIS", "split": "test", "star_events_count": 1} {"blob_id": "1788ca7326b0e64b945734d3c535aa35f5f0b8bb", "bodies": ["self.copy_tasks = copy_tasks\nself.job_id = job_id\nself.job_name = job_name\nself.job_run_id = job_run_id\nself.job_run_start_time_usecs = job_run_start_time_usecs\nself.last_run_end_time_usecs = last_run_end_time_usecs\nself.last_run_start_time_usecs = last_run_start_time_usecs\nself.message = message\nself.num_bytes_read = num_bytes_read\nself.num_logical_bytes_protected = num_logical_bytes_protected\nself.pagination_cookie = pagination_cookie\nself.run_status = run_status\nself.run_type = run_type", "if dictionary is None:\n return None\ncopy_tasks = None\nif dictionary.get('copyTasks') != None:\n copy_tasks = list()\n for structure in dictionary.get('copyTasks'):\n copy_tasks.append(cohesity_management_sdk.models.snapshot_copy_task.SnapshotCopyTask.from_dictionary(structure))\njob_id = dictionary.get('jobId')\njob_name = dictionary.get('jobName')\njob_run_id = dictionary.get('jobRunId')\njob_run_start_time_usecs = dictionary.get('jobRunStartTimeUsecs')\nlast_run_end_time_usecs = dictionary.get('lastRunEndTimeUsecs')\nlast_run_start_time_usecs = dictionary.get('lastRunStartTimeUsecs')\nmessage = dictionary.get('message')\nnum_bytes_read = dictionary.get('numBytesRead')\nnum_logical_bytes_protected = dictionary.get('numLogicalBytesProtected')\npagination_cookie = dictionary.get('paginationCookie')\nrun_status = dictionary.get('runStatus')\nrun_type = dictionary.get('runType')\nreturn cls(copy_tasks, job_id, job_name, job_run_id, job_run_start_time_usecs, last_run_end_time_usecs, last_run_start_time_usecs, message, num_bytes_read, num_logical_bytes_protected, pagination_cookie, run_status, run_type)"], "bodies_text": "<|body_start_0|>\n self.copy_tasks = copy_tasks\n self.job_id = job_id\n self.job_name = job_name\n self.job_run_id = job_run_id\n self.job_run_start_time_usecs = job_run_start_time_usecs\n self.last_run_end_time_usecs = last_run_end_time_usecs\n self.last_run_start_time_usecs = last_run_start_time_usecs\n self.message = message\n self.num_bytes_read = num_bytes_read\n self.num_logical_bytes_protected = num_logical_bytes_protected\n self.pagination_cookie = pagination_cookie\n self.run_status = run_status\n self.run_type = run_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n copy_tasks = None\n if dictionary.get('copyTasks') != None:\n copy_tasks = list()\n for structure in dictionary.get('copyTasks'):\n copy_tasks.append(cohesity_management_sdk.models.snapshot_copy_task.SnapshotCopyTask.from_dictionary(structure))\n job_id = dictionary.get('jobId')\n job_name = dictionary.get('jobName')\n job_run_id = dictionary.get('jobRunId')\n job_run_start_time_usecs = dictionary.get('jobRunStartTimeUsecs')\n last_run_end_time_usecs = dictionary.get('lastRunEndTimeUsecs')\n last_run_start_time_usecs = dictionary.get('lastRunStartTimeUsecs')\n message = dictionary.get('message')\n num_bytes_read = dictionary.get('numBytesRead')\n num_logical_bytes_protected = dictionary.get('numLogicalBytesProtected')\n pagination_cookie = dictionary.get('paginationCookie')\n run_status = dictionary.get('runStatus')\n run_type = dictionary.get('runType')\n return cls(copy_tasks, job_id, job_name, job_run_id, job_run_start_time_usecs, last_run_end_time_usecs, last_run_start_time_usecs, message, num_bytes_read, num_logical_bytes_protected, pagination_cookie, run_status, run_type)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e", "class_name": "ProtectionSourceSnapshotInformation", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProtectionSourceSnapshotInformation:\n \"\"\"Implementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e\"\"\"\n\n def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None):\n \"\"\"Constructor for the ProtectionSourceSnapshotInformation class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.copy_tasks = copy_tasks\n self.job_id = job_id\n self.job_name = job_name\n self.job_run_id = job_run_id\n self.job_run_start_time_usecs = job_run_start_time_usecs\n self.last_run_end_time_usecs = last_run_end_time_usecs\n self.last_run_start_time_usecs = last_run_start_time_usecs\n self.message = message\n self.num_bytes_read = num_bytes_read\n self.num_logical_bytes_protected = num_logical_bytes_protected\n self.pagination_cookie = pagination_cookie\n self.run_status = run_status\n self.run_type = run_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n copy_tasks = None\n if dictionary.get('copyTasks') != None:\n copy_tasks = list()\n for structure in dictionary.get('copyTasks'):\n copy_tasks.append(cohesity_management_sdk.models.snapshot_copy_task.SnapshotCopyTask.from_dictionary(structure))\n job_id = dictionary.get('jobId')\n job_name = dictionary.get('jobName')\n job_run_id = dictionary.get('jobRunId')\n job_run_start_time_usecs = dictionary.get('jobRunStartTimeUsecs')\n last_run_end_time_usecs = dictionary.get('lastRunEndTimeUsecs')\n last_run_start_time_usecs = dictionary.get('lastRunStartTimeUsecs')\n message = dictionary.get('message')\n num_bytes_read = dictionary.get('numBytesRead')\n num_logical_bytes_protected = dictionary.get('numLogicalBytesProtected')\n pagination_cookie = dictionary.get('paginationCookie')\n run_status = dictionary.get('runStatus')\n run_type = dictionary.get('runType')\n return cls(copy_tasks, job_id, job_name, job_run_id, job_run_start_time_usecs, last_run_end_time_usecs, last_run_start_time_usecs, message, num_bytes_read, num_logical_bytes_protected, pagination_cookie, run_status, run_type)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000207", "length_bytes": 7225, "license_type": "permissive", "methods": [{"docstring": "Constructor for the ProtectionSourceSnapshotInformation class", "name": "__init__", "signature": "def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_051876", "prompt": "Implement the Python class `ProtectionSourceSnapshotInformation` described below.\n\nClass description:\nImplementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e\n\nMethod signatures and docstrings:\n- def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None): Constructor for the ProtectionSourceSnapshotInformation class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `ProtectionSourceSnapshotInformation` described below.\n\nClass description:\nImplementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e\n\nMethod signatures and docstrings:\n- def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None): Constructor for the ProtectionSourceSnapshotInformation class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass ProtectionSourceSnapshotInformation:\n \"\"\"Implementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e\"\"\"\n\n def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None):\n \"\"\"Constructor for the ProtectionSourceSnapshotInformation class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.copy_tasks = copy_tasks\n self.job_id = job_id\n self.job_name = job_name\n self.job_run_id = job_run_id\n self.job_run_start_time_usecs = job_run_start_time_usecs\n self.last_run_end_time_usecs = last_run_end_time_usecs\n self.last_run_start_time_usecs = last_run_start_time_usecs\n self.message = message\n self.num_bytes_read = num_bytes_read\n self.num_logical_bytes_protected = num_logical_bytes_protected\n self.pagination_cookie = pagination_cookie\n self.run_status = run_status\n self.run_type = run_type\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n copy_tasks = None\n if dictionary.get('copyTasks') != None:\n copy_tasks = list()\n for structure in dictionary.get('copyTasks'):\n copy_tasks.append(cohesity_management_sdk.models.snapshot_copy_task.SnapshotCopyTask.from_dictionary(structure))\n job_id = dictionary.get('jobId')\n job_name = dictionary.get('jobName')\n job_run_id = dictionary.get('jobRunId')\n job_run_start_time_usecs = dictionary.get('jobRunStartTimeUsecs')\n last_run_end_time_usecs = dictionary.get('lastRunEndTimeUsecs')\n last_run_start_time_usecs = dictionary.get('lastRunStartTimeUsecs')\n message = dictionary.get('message')\n num_bytes_read = dictionary.get('numBytesRead')\n num_logical_bytes_protected = dictionary.get('numLogicalBytesProtected')\n pagination_cookie = dictionary.get('paginationCookie')\n run_status = dictionary.get('runStatus')\n run_type = dictionary.get('runType')\n return cls(copy_tasks, job_id, job_name, job_run_id, job_run_start_time_usecs, last_run_end_time_usecs, last_run_start_time_usecs, message, num_bytes_read, num_logical_bytes_protected, pagination_cookie, run_status, run_type)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass ProtectionSourceSnapshotInformation:\n \"\"\"Implementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e\"\"\"\n\n def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None):\n \"\"\"Constructor for the ProtectionSourceSnapshotInformation class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProtectionSourceSnapshotInformation:\n \"\"\"Implementation of the 'ProtectionSourceSnapshotInformation' model. Specifies details about a Snapshot that backups up a leaf Protection Source Object. Attributes: copy_tasks (list of SnapshotCopyTask): Array of Snapshot Copy Tasks. Specifies a list of copy tasks (such as replication and archival tasks). job_id (long|int): Specifies the id of the Protection Job. job_name (string): Specifies the name of the Protection Job. job_run_id (long|int): Specifies the id of the Job Run. job_run_start_time_usecs (long|int): Specifies the start time of the Job which this object is part of. The time is specified in Unix epoch Timestamp (in microseconds). last_run_end_time_usecs (long|int): Specifies the e\"\"\"\n\n def __init__(self, copy_tasks=None, job_id=None, job_name=None, job_run_id=None, job_run_start_time_usecs=None, last_run_end_time_usecs=None, last_run_start_time_usecs=None, message=None, num_bytes_read=None, num_logical_bytes_protected=None, pagination_cookie=None, run_status=None, run_type=None):\n \"\"\"Constructor for the ProtectionSourceSnapshotInformation class\"\"\"\n self.copy_tasks = copy_tasks\n self.job_id = job_id\n self.job_name = job_name\n self.job_run_id = job_run_id\n self.job_run_start_time_usecs = job_run_start_time_usecs\n self.last_run_end_time_usecs = last_run_end_time_usecs\n self.last_run_start_time_usecs = last_run_start_time_usecs\n self.message = message\n self.num_bytes_read = num_bytes_read\n self.num_logical_bytes_protected = num_logical_bytes_protected\n self.pagination_cookie = pagination_cookie\n self.run_status = run_status\n self.run_type = run_type\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n copy_tasks = None\n if dictionary.get('copyTasks') != None:\n copy_tasks = list()\n for structure in dictionary.get('copyTasks'):\n copy_tasks.append(cohesity_management_sdk.models.snapshot_copy_task.SnapshotCopyTask.from_dictionary(structure))\n job_id = dictionary.get('jobId')\n job_name = dictionary.get('jobName')\n job_run_id = dictionary.get('jobRunId')\n job_run_start_time_usecs = dictionary.get('jobRunStartTimeUsecs')\n last_run_end_time_usecs = dictionary.get('lastRunEndTimeUsecs')\n last_run_start_time_usecs = dictionary.get('lastRunStartTimeUsecs')\n message = dictionary.get('message')\n num_bytes_read = dictionary.get('numBytesRead')\n num_logical_bytes_protected = dictionary.get('numLogicalBytesProtected')\n pagination_cookie = dictionary.get('paginationCookie')\n run_status = dictionary.get('runStatus')\n run_type = dictionary.get('runType')\n return cls(copy_tasks, job_id, job_name, job_run_id, job_run_start_time_usecs, last_run_end_time_usecs, last_run_start_time_usecs, message, num_bytes_read, num_logical_bytes_protected, pagination_cookie, run_status, run_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/protection_source_snapshot_information.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "74740fad7b96eeb46118e5d97bf81abef5df8f6e", "bodies": ["super().__init__(coordinator, device, 'power', 'Energy usage', f'power_{dev_type}')\nself._device = device\nself._type = dev_type\nself._attr_name = f'Energy {dev_type}'\nself._attr_state_class = state_class", "if self.coordinator.data.power_meter is None:\n return None\nreturn getattr(self.coordinator.data.power_meter, f'_{self._type}', None)"], "bodies_text": "<|body_start_0|>\n super().__init__(coordinator, device, 'power', 'Energy usage', f'power_{dev_type}')\n self._device = device\n self._type = dev_type\n self._attr_name = f'Energy {dev_type}'\n self._attr_state_class = state_class\n<|end_body_0|>\n\n<|body_start_1|>\n if self.coordinator.data.power_meter is None:\n return None\n return getattr(self.coordinator.data.power_meter, f'_{self._type}', None)\n<|end_body_1|>\n", "class_docstring": "The Youless low meter value sensor.", "class_name": "EnergyMeterSensor", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EnergyMeterSensor:\n \"\"\"The Youless low meter value sensor.\"\"\"\n\n def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None:\n \"\"\"Instantiate a energy meter sensor.\"\"\"\n <|body_0|>\n\n def get_sensor(self) -> YoulessSensor | None:\n \"\"\"Get the sensor for providing the value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator, device, 'power', 'Energy usage', f'power_{dev_type}')\n self._device = device\n self._type = dev_type\n self._attr_name = f'Energy {dev_type}'\n self._attr_state_class = state_class\n<|end_body_0|>\n\n<|body_start_1|>\n if self.coordinator.data.power_meter is None:\n return None\n return getattr(self.coordinator.data.power_meter, f'_{self._type}', None)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000208", "length_bytes": 11812, "license_type": "permissive", "methods": [{"docstring": "Instantiate a energy meter sensor.", "name": "__init__", "signature": "def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None"}, {"docstring": "Get the sensor for providing the value.", "name": "get_sensor", "signature": "def get_sensor(self) -> YoulessSensor | None"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034814", "prompt": "Implement the Python class `EnergyMeterSensor` described below.\n\nClass description:\nThe Youless low meter value sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None: Instantiate a energy meter sensor.\n- def get_sensor(self) -> YoulessSensor | None: Get the sensor for providing the value.", "prompted_full_text": "Implement the Python class `EnergyMeterSensor` described below.\n\nClass description:\nThe Youless low meter value sensor.\n\nMethod signatures and docstrings:\n- def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None: Instantiate a energy meter sensor.\n- def get_sensor(self) -> YoulessSensor | None: Get the sensor for providing the value.\n\n<|skeleton|>\nclass EnergyMeterSensor:\n \"\"\"The Youless low meter value sensor.\"\"\"\n\n def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None:\n \"\"\"Instantiate a energy meter sensor.\"\"\"\n <|body_0|>\n\n def get_sensor(self) -> YoulessSensor | None:\n \"\"\"Get the sensor for providing the value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(coordinator, device, 'power', 'Energy usage', f'power_{dev_type}')\n self._device = device\n self._type = dev_type\n self._attr_name = f'Energy {dev_type}'\n self._attr_state_class = state_class\n<|end_body_0|>\n\n<|body_start_1|>\n if self.coordinator.data.power_meter is None:\n return None\n return getattr(self.coordinator.data.power_meter, f'_{self._type}', None)\n<|end_body_1|>\n", "revision_id": "80caeafcb5b6e2f9da192d0ea6dd1a5b8244b743", "skeleton": "<|skeleton|>\nclass EnergyMeterSensor:\n \"\"\"The Youless low meter value sensor.\"\"\"\n\n def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None:\n \"\"\"Instantiate a energy meter sensor.\"\"\"\n <|body_0|>\n\n def get_sensor(self) -> YoulessSensor | None:\n \"\"\"Get the sensor for providing the value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EnergyMeterSensor:\n \"\"\"The Youless low meter value sensor.\"\"\"\n\n def __init__(self, coordinator: DataUpdateCoordinator[YoulessAPI], device: str, dev_type: str, state_class: SensorStateClass) -> None:\n \"\"\"Instantiate a energy meter sensor.\"\"\"\n super().__init__(coordinator, device, 'power', 'Energy usage', f'power_{dev_type}')\n self._device = device\n self._type = dev_type\n self._attr_name = f'Energy {dev_type}'\n self._attr_state_class = state_class\n\n def get_sensor(self) -> YoulessSensor | None:\n \"\"\"Get the sensor for providing the value.\"\"\"\n if self.coordinator.data.power_meter is None:\n return None\n return getattr(self.coordinator.data.power_meter, f'_{self._type}', None)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/youless/sensor.py", "source_repo": "home-assistant/core", "split": "test", "star_events_count": 35501} {"blob_id": "00f91c62fd9a5410eb6cb531a9f5c199871001f4", "bodies": ["super(CNN, self).__init__()\ndecreasing = 0\nif not is_increasing:\n decreasing = 1\nassert 1 <= num_layers <= 4\nself.num_layers = num_layers\nmap_conv_layer_to_filter_size = {4: [[3, 5, 5, 7], [7, 5, 5, 3]], 3: [[5, 5, 7], [7, 5, 5]], 2: [[5, 3], [5, 3]], 1: [[7], [7]]}\npool_output_height = int(np.floor(max_len_token / 2.0))\nfor i in range(1, self.num_layers + 1):\n filter_size = map_conv_layer_to_filter_size[self.num_layers][decreasing][i - 1]\n padding_size = math.floor(filter_size / 2)\n prev_filter_count = 1\n if i > 1:\n prev_filter_count = filter_counts[i - 2]\n convlyr = nn.Conv2d(prev_filter_count, filter_counts[i - 1], filter_size, padding=padding_size, stride=1)\n if i == 1:\n self.add_module('cnn_1', convlyr)\n elif i == 2:\n self.add_module('cnn_2', convlyr)\n elif i == 3:\n self.add_module('cnn_3', convlyr)\n elif i == 4:\n self.add_module('cnn_4', convlyr)\nself.align_weights = nn.Parameter(torch.randn(filter_counts[num_layers - 1], pool_output_height, pool_output_height).cuda(), requires_grad=True)\nself.final_dense = nn.Conv2d(filter_counts[num_layers - 1], 1, 1)\nself.relu = nn.ReLU()\nself.pool = nn.MaxPool2d((2, 2), stride=2)", "convd = self.cnn_1(src_tgt_sim.unsqueeze(1))\nif self.num_layers > 1:\n convd = self.relu(convd)\n convd = self.cnn_2(convd)\nif self.num_layers > 2:\n convd = self.relu(convd)\n convd = self.cnn_3(convd)\nif self.num_layers > 3:\n convd = self.relu(convd)\n convd = self.cnn_4(convd)\nconvd_after_pooling = self.pool(convd)\npooled_mask = self.pool(src_tgt_mask)\noutput = self.final_dense(convd_after_pooling).squeeze(1)\nreturn (output * pooled_mask).sum(2).sum(1) / pooled_mask.sum(2).sum(1)"], "bodies_text": "<|body_start_0|>\n super(CNN, self).__init__()\n decreasing = 0\n if not is_increasing:\n decreasing = 1\n assert 1 <= num_layers <= 4\n self.num_layers = num_layers\n map_conv_layer_to_filter_size = {4: [[3, 5, 5, 7], [7, 5, 5, 3]], 3: [[5, 5, 7], [7, 5, 5]], 2: [[5, 3], [5, 3]], 1: [[7], [7]]}\n pool_output_height = int(np.floor(max_len_token / 2.0))\n for i in range(1, self.num_layers + 1):\n filter_size = map_conv_layer_to_filter_size[self.num_layers][decreasing][i - 1]\n padding_size = math.floor(filter_size / 2)\n prev_filter_count = 1\n if i > 1:\n prev_filter_count = filter_counts[i - 2]\n convlyr = nn.Conv2d(prev_filter_count, filter_counts[i - 1], filter_size, padding=padding_size, stride=1)\n if i == 1:\n self.add_module('cnn_1', convlyr)\n elif i == 2:\n self.add_module('cnn_2', convlyr)\n elif i == 3:\n self.add_module('cnn_3', convlyr)\n elif i == 4:\n self.add_module('cnn_4', convlyr)\n self.align_weights = nn.Parameter(torch.randn(filter_counts[num_layers - 1], pool_output_height, pool_output_height).cuda(), requires_grad=True)\n self.final_dense = nn.Conv2d(filter_counts[num_layers - 1], 1, 1)\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d((2, 2), stride=2)\n<|end_body_0|>\n\n<|body_start_1|>\n convd = self.cnn_1(src_tgt_sim.unsqueeze(1))\n if self.num_layers > 1:\n convd = self.relu(convd)\n convd = self.cnn_2(convd)\n if self.num_layers > 2:\n convd = self.relu(convd)\n convd = self.cnn_3(convd)\n if self.num_layers > 3:\n convd = self.relu(convd)\n convd = self.cnn_4(convd)\n convd_after_pooling = self.pool(convd)\n pooled_mask = self.pool(src_tgt_mask)\n output = self.final_dense(convd_after_pooling).squeeze(1)\n return (output * pooled_mask).sum(2).sum(1) / pooled_mask.sum(2).sum(1)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "CNN", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CNN:\n\n def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100):\n \"\"\"params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence\"\"\"\n <|body_0|>\n\n def forward(self, src_tgt_sim, src_tgt_mask):\n \"\"\"Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CNN, self).__init__()\n decreasing = 0\n if not is_increasing:\n decreasing = 1\n assert 1 <= num_layers <= 4\n self.num_layers = num_layers\n map_conv_layer_to_filter_size = {4: [[3, 5, 5, 7], [7, 5, 5, 3]], 3: [[5, 5, 7], [7, 5, 5]], 2: [[5, 3], [5, 3]], 1: [[7], [7]]}\n pool_output_height = int(np.floor(max_len_token / 2.0))\n for i in range(1, self.num_layers + 1):\n filter_size = map_conv_layer_to_filter_size[self.num_layers][decreasing][i - 1]\n padding_size = math.floor(filter_size / 2)\n prev_filter_count = 1\n if i > 1:\n prev_filter_count = filter_counts[i - 2]\n convlyr = nn.Conv2d(prev_filter_count, filter_counts[i - 1], filter_size, padding=padding_size, stride=1)\n if i == 1:\n self.add_module('cnn_1', convlyr)\n elif i == 2:\n self.add_module('cnn_2', convlyr)\n elif i == 3:\n self.add_module('cnn_3', convlyr)\n elif i == 4:\n self.add_module('cnn_4', convlyr)\n self.align_weights = nn.Parameter(torch.randn(filter_counts[num_layers - 1], pool_output_height, pool_output_height).cuda(), requires_grad=True)\n self.final_dense = nn.Conv2d(filter_counts[num_layers - 1], 1, 1)\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d((2, 2), stride=2)\n<|end_body_0|>\n\n<|body_start_1|>\n convd = self.cnn_1(src_tgt_sim.unsqueeze(1))\n if self.num_layers > 1:\n convd = self.relu(convd)\n convd = self.cnn_2(convd)\n if self.num_layers > 2:\n convd = self.relu(convd)\n convd = self.cnn_3(convd)\n if self.num_layers > 3:\n convd = self.relu(convd)\n convd = self.cnn_4(convd)\n convd_after_pooling = self.pool(convd)\n pooled_mask = self.pool(src_tgt_mask)\n output = self.final_dense(convd_after_pooling).squeeze(1)\n return (output * pooled_mask).sum(2).sum(1) / pooled_mask.sum(2).sum(1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000209", "length_bytes": 8806, "license_type": "permissive", "methods": [{"docstring": "params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence", "name": "__init__", "signature": "def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100)"}, {"docstring": "Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity", "name": "forward", "signature": "def forward(self, src_tgt_sim, src_tgt_mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031285", "prompt": "Implement the Python class `CNN` described below.\n\nClass description:\nImplement the CNN class.\n\nMethod signatures and docstrings:\n- def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100): params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence\n- def forward(self, src_tgt_sim, src_tgt_mask): Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity", "prompted_full_text": "Implement the Python class `CNN` described below.\n\nClass description:\nImplement the CNN class.\n\nMethod signatures and docstrings:\n- def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100): params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence\n- def forward(self, src_tgt_sim, src_tgt_mask): Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity\n\n<|skeleton|>\nclass CNN:\n\n def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100):\n \"\"\"params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence\"\"\"\n <|body_0|>\n\n def forward(self, src_tgt_sim, src_tgt_mask):\n \"\"\"Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CNN, self).__init__()\n decreasing = 0\n if not is_increasing:\n decreasing = 1\n assert 1 <= num_layers <= 4\n self.num_layers = num_layers\n map_conv_layer_to_filter_size = {4: [[3, 5, 5, 7], [7, 5, 5, 3]], 3: [[5, 5, 7], [7, 5, 5]], 2: [[5, 3], [5, 3]], 1: [[7], [7]]}\n pool_output_height = int(np.floor(max_len_token / 2.0))\n for i in range(1, self.num_layers + 1):\n filter_size = map_conv_layer_to_filter_size[self.num_layers][decreasing][i - 1]\n padding_size = math.floor(filter_size / 2)\n prev_filter_count = 1\n if i > 1:\n prev_filter_count = filter_counts[i - 2]\n convlyr = nn.Conv2d(prev_filter_count, filter_counts[i - 1], filter_size, padding=padding_size, stride=1)\n if i == 1:\n self.add_module('cnn_1', convlyr)\n elif i == 2:\n self.add_module('cnn_2', convlyr)\n elif i == 3:\n self.add_module('cnn_3', convlyr)\n elif i == 4:\n self.add_module('cnn_4', convlyr)\n self.align_weights = nn.Parameter(torch.randn(filter_counts[num_layers - 1], pool_output_height, pool_output_height).cuda(), requires_grad=True)\n self.final_dense = nn.Conv2d(filter_counts[num_layers - 1], 1, 1)\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d((2, 2), stride=2)\n<|end_body_0|>\n\n<|body_start_1|>\n convd = self.cnn_1(src_tgt_sim.unsqueeze(1))\n if self.num_layers > 1:\n convd = self.relu(convd)\n convd = self.cnn_2(convd)\n if self.num_layers > 2:\n convd = self.relu(convd)\n convd = self.cnn_3(convd)\n if self.num_layers > 3:\n convd = self.relu(convd)\n convd = self.cnn_4(convd)\n convd_after_pooling = self.pool(convd)\n pooled_mask = self.pool(src_tgt_mask)\n output = self.final_dense(convd_after_pooling).squeeze(1)\n return (output * pooled_mask).sum(2).sum(1) / pooled_mask.sum(2).sum(1)\n<|end_body_1|>\n", "revision_id": "5dca6fa477c6fdb93b042deb1b0212bb91ce7f00", "skeleton": "<|skeleton|>\nclass CNN:\n\n def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100):\n \"\"\"params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence\"\"\"\n <|body_0|>\n\n def forward(self, src_tgt_sim, src_tgt_mask):\n \"\"\"Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CNN:\n def __init__(self, is_increasing, num_layers, filter_counts, max_len_token=100):\n \"\"\"params is_increasing: whether the filter size is increasing or decreasing params num_layers: number of layers in the CNN params filter_counts: dictionary of filter index to filter size params max_len_token: maximum number of tokens in sentence\"\"\"\n super(CNN, self).__init__()\n decreasing = 0\n if not is_increasing:\n decreasing = 1\n assert 1 <= num_layers <= 4\n self.num_layers = num_layers\n map_conv_layer_to_filter_size = {4: [[3, 5, 5, 7], [7, 5, 5, 3]], 3: [[5, 5, 7], [7, 5, 5]], 2: [[5, 3], [5, 3]], 1: [[7], [7]]}\n pool_output_height = int(np.floor(max_len_token / 2.0))\n for i in range(1, self.num_layers + 1):\n filter_size = map_conv_layer_to_filter_size[self.num_layers][decreasing][i - 1]\n padding_size = math.floor(filter_size / 2)\n prev_filter_count = 1\n if i > 1:\n prev_filter_count = filter_counts[i - 2]\n convlyr = nn.Conv2d(prev_filter_count, filter_counts[i - 1], filter_size, padding=padding_size, stride=1)\n if i == 1:\n self.add_module('cnn_1', convlyr)\n elif i == 2:\n self.add_module('cnn_2', convlyr)\n elif i == 3:\n self.add_module('cnn_3', convlyr)\n elif i == 4:\n self.add_module('cnn_4', convlyr)\n self.align_weights = nn.Parameter(torch.randn(filter_counts[num_layers - 1], pool_output_height, pool_output_height).cuda(), requires_grad=True)\n self.final_dense = nn.Conv2d(filter_counts[num_layers - 1], 1, 1)\n self.relu = nn.ReLU()\n self.pool = nn.MaxPool2d((2, 2), stride=2)\n\n def forward(self, src_tgt_sim, src_tgt_mask):\n \"\"\"Run CNN over input. :params src_tgt_sim: tensor representing similarity between source and target :return: scores for similarity\"\"\"\n convd = self.cnn_1(src_tgt_sim.unsqueeze(1))\n if self.num_layers > 1:\n convd = self.relu(convd)\n convd = self.cnn_2(convd)\n if self.num_layers > 2:\n convd = self.relu(convd)\n convd = self.cnn_3(convd)\n if self.num_layers > 3:\n convd = self.relu(convd)\n convd = self.cnn_4(convd)\n convd_after_pooling = self.pool(convd)\n pooled_mask = self.pool(src_tgt_mask)\n output = self.final_dense(convd_after_pooling).squeeze(1)\n return (output * pooled_mask).sum(2).sum(1) / pooled_mask.sum(2).sum(1)\n", "source": "the_stack_v2_python_sparse", "source_path": "stance.py", "source_repo": "jlibovicky/neural-string-edit-distance", "split": "test", "star_events_count": 2} {"blob_id": "4284e77452d4858e9b5a1156425cef139289a398", "bodies": ["if len(query) == 0:\n return ('', ['$'])\nprefix, jsonpath_exp = Parser.parse(query)\nif jsonpath_exp is None:\n return ('', [])\ncurrent_data = [match.value for match in jsonpath_exp.find(data)]\noptions = JSONPathAutoComplete._list_options(current_data, prefix)\nreturn (prefix, options)", "options = []\nif len(data) > 1:\n options = JSONPathAutoComplete._generate_list_completes(len(data), prefix)\nelse:\n cur = data[0]\n if isinstance(cur, list):\n options = JSONPathAutoComplete._generate_list_completes(len(cur), prefix)\n elif isinstance(cur, dict):\n for key in cur.keys():\n if re.match('.*[\\\\.\\\\[\\\\]].*', key) or prefix.startswith('['):\n options.append(\"['\" + key + \"']\")\n else:\n options.append(key)\n options = list(filter(lambda k: k.startswith(prefix), options))\nreturn options", "options = ['[*]']\noptions.extend(['[' + str(index) + ']' for index in range(length)])\nreturn list(filter(lambda k: k.startswith(prefix), options))"], "bodies_text": "<|body_start_0|>\n if len(query) == 0:\n return ('', ['$'])\n prefix, jsonpath_exp = Parser.parse(query)\n if jsonpath_exp is None:\n return ('', [])\n current_data = [match.value for match in jsonpath_exp.find(data)]\n options = JSONPathAutoComplete._list_options(current_data, prefix)\n return (prefix, options)\n<|end_body_0|>\n\n<|body_start_1|>\n options = []\n if len(data) > 1:\n options = JSONPathAutoComplete._generate_list_completes(len(data), prefix)\n else:\n cur = data[0]\n if isinstance(cur, list):\n options = JSONPathAutoComplete._generate_list_completes(len(cur), prefix)\n elif isinstance(cur, dict):\n for key in cur.keys():\n if re.match('.*[\\\\.\\\\[\\\\]].*', key) or prefix.startswith('['):\n options.append(\"['\" + key + \"']\")\n else:\n options.append(key)\n options = list(filter(lambda k: k.startswith(prefix), options))\n return options\n<|end_body_1|>\n\n<|body_start_2|>\n options = ['[*]']\n options.extend(['[' + str(index) + ']' for index in range(length)])\n return list(filter(lambda k: k.startswith(prefix), options))\n<|end_body_2|>\n", "class_docstring": "Utility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)", "class_name": "JSONPathAutoComplete", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JSONPathAutoComplete:\n \"\"\"Utility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)\"\"\"\n\n def complete(data, query):\n \"\"\"Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)\"\"\"\n <|body_0|>\n\n def _list_options(data, prefix):\n \"\"\"List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node\"\"\"\n <|body_1|>\n\n def _generate_list_completes(length, prefix):\n \"\"\"Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(query) == 0:\n return ('', ['$'])\n prefix, jsonpath_exp = Parser.parse(query)\n if jsonpath_exp is None:\n return ('', [])\n current_data = [match.value for match in jsonpath_exp.find(data)]\n options = JSONPathAutoComplete._list_options(current_data, prefix)\n return (prefix, options)\n<|end_body_0|>\n\n<|body_start_1|>\n options = []\n if len(data) > 1:\n options = JSONPathAutoComplete._generate_list_completes(len(data), prefix)\n else:\n cur = data[0]\n if isinstance(cur, list):\n options = JSONPathAutoComplete._generate_list_completes(len(cur), prefix)\n elif isinstance(cur, dict):\n for key in cur.keys():\n if re.match('.*[\\\\.\\\\[\\\\]].*', key) or prefix.startswith('['):\n options.append(\"['\" + key + \"']\")\n else:\n options.append(key)\n options = list(filter(lambda k: k.startswith(prefix), options))\n return options\n<|end_body_1|>\n\n<|body_start_2|>\n options = ['[*]']\n options.extend(['[' + str(index) + ']' for index in range(length)])\n return list(filter(lambda k: k.startswith(prefix), options))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000210", "length_bytes": 2942, "license_type": "permissive", "methods": [{"docstring": "Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)", "name": "complete", "signature": "def complete(data, query)"}, {"docstring": "List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node", "name": "_list_options", "signature": "def _list_options(data, prefix)"}, {"docstring": "Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.", "name": "_generate_list_completes", "signature": "def _generate_list_completes(length, prefix)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_041180", "prompt": "Implement the Python class `JSONPathAutoComplete` described below.\n\nClass description:\nUtility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)\n\nMethod signatures and docstrings:\n- def complete(data, query): Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)\n- def _list_options(data, prefix): List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node\n- def _generate_list_completes(length, prefix): Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.", "prompted_full_text": "Implement the Python class `JSONPathAutoComplete` described below.\n\nClass description:\nUtility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)\n\nMethod signatures and docstrings:\n- def complete(data, query): Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)\n- def _list_options(data, prefix): List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node\n- def _generate_list_completes(length, prefix): Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.\n\n<|skeleton|>\nclass JSONPathAutoComplete:\n \"\"\"Utility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)\"\"\"\n\n def complete(data, query):\n \"\"\"Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)\"\"\"\n <|body_0|>\n\n def _list_options(data, prefix):\n \"\"\"List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node\"\"\"\n <|body_1|>\n\n def _generate_list_completes(length, prefix):\n \"\"\"Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(query) == 0:\n return ('', ['$'])\n prefix, jsonpath_exp = Parser.parse(query)\n if jsonpath_exp is None:\n return ('', [])\n current_data = [match.value for match in jsonpath_exp.find(data)]\n options = JSONPathAutoComplete._list_options(current_data, prefix)\n return (prefix, options)\n<|end_body_0|>\n\n<|body_start_1|>\n options = []\n if len(data) > 1:\n options = JSONPathAutoComplete._generate_list_completes(len(data), prefix)\n else:\n cur = data[0]\n if isinstance(cur, list):\n options = JSONPathAutoComplete._generate_list_completes(len(cur), prefix)\n elif isinstance(cur, dict):\n for key in cur.keys():\n if re.match('.*[\\\\.\\\\[\\\\]].*', key) or prefix.startswith('['):\n options.append(\"['\" + key + \"']\")\n else:\n options.append(key)\n options = list(filter(lambda k: k.startswith(prefix), options))\n return options\n<|end_body_1|>\n\n<|body_start_2|>\n options = ['[*]']\n options.extend(['[' + str(index) + ']' for index in range(length)])\n return list(filter(lambda k: k.startswith(prefix), options))\n<|end_body_2|>\n", "revision_id": "728af55b31951db7e14535f200816354a64a3267", "skeleton": "<|skeleton|>\nclass JSONPathAutoComplete:\n \"\"\"Utility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)\"\"\"\n\n def complete(data, query):\n \"\"\"Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)\"\"\"\n <|body_0|>\n\n def _list_options(data, prefix):\n \"\"\"List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node\"\"\"\n <|body_1|>\n\n def _generate_list_completes(length, prefix):\n \"\"\"Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class JSONPathAutoComplete:\n \"\"\"Utility library that performs auto-completion on the given potentially incomplete query. :py:meth:`complete` is the single entry point of this class which returns Tuple.of(the incomplete prefix, potential completes)\"\"\"\n\n def complete(data, query):\n \"\"\"Complete the given query with potential options in the given data. :param data: the data :type data: dict, list, int, float, str, bool, None :param query: the query :type query: str :return: (option_prefix, options)\"\"\"\n if len(query) == 0:\n return ('', ['$'])\n prefix, jsonpath_exp = Parser.parse(query)\n if jsonpath_exp is None:\n return ('', [])\n current_data = [match.value for match in jsonpath_exp.find(data)]\n options = JSONPathAutoComplete._list_options(current_data, prefix)\n return (prefix, options)\n\n def _list_options(data, prefix):\n \"\"\"List all possible next values with the query result from last valid JSONPath query with given prefix. :param data: the current query result :type data: dict, list, int, float, str, bool, None :return: potential options, return empty if `data` is not a composite JSON node\"\"\"\n options = []\n if len(data) > 1:\n options = JSONPathAutoComplete._generate_list_completes(len(data), prefix)\n else:\n cur = data[0]\n if isinstance(cur, list):\n options = JSONPathAutoComplete._generate_list_completes(len(cur), prefix)\n elif isinstance(cur, dict):\n for key in cur.keys():\n if re.match('.*[\\\\.\\\\[\\\\]].*', key) or prefix.startswith('['):\n options.append(\"['\" + key + \"']\")\n else:\n options.append(key)\n options = list(filter(lambda k: k.startswith(prefix), options))\n return options\n\n def _generate_list_completes(length, prefix):\n \"\"\"Generates completes for an array, use its length also add `*` to represent everything. :param length: the list length :type length: int :return: completes for current length.\"\"\"\n options = ['[*]']\n options.extend(['[' + str(index) + ']' for index in range(length)])\n return list(filter(lambda k: k.startswith(prefix), options))\n", "source": "the_stack_v2_python_sparse", "source_path": "src/pyfx/model/autocomplete/autocomplete.py", "source_repo": "swipswaps/pyfx", "split": "test", "star_events_count": 0} {"blob_id": "c3a83b2ab52c1dd286afdd619b85fb5a1e297690", "bodies": ["if not board:\n return False\nfor i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\nreturn False", "if len(word) == 0:\n return True\nif i < 0 or i >= len(board) or j < 0 or (j >= len(board[0])) or (board[i][j] != word[0]):\n return False\ntmp = board[i][j]\nboard[i][j] = '#'\nres = self.dfs(board, i + 1, j, word[1:]) or self.dfs(board, i - 1, j, word[1:]) or self.dfs(board, i, j + 1, word[1:]) or self.dfs(board, i, j - 1, word[1:])\nboard[i][j] = tmp\nreturn res"], "bodies_text": "<|body_start_0|>\n if not board:\n return False\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if len(word) == 0:\n return True\n if i < 0 or i >= len(board) or j < 0 or (j >= len(board[0])) or (board[i][j] != word[0]):\n return False\n tmp = board[i][j]\n board[i][j] = '#'\n res = self.dfs(board, i + 1, j, word[1:]) or self.dfs(board, i - 1, j, word[1:]) or self.dfs(board, i, j + 1, word[1:]) or self.dfs(board, i, j - 1, word[1:])\n board[i][j] = tmp\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, board, i, j, word):\n \"\"\":type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not board:\n return False\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if len(word) == 0:\n return True\n if i < 0 or i >= len(board) or j < 0 or (j >= len(board[0])) or (board[i][j] != word[0]):\n return False\n tmp = board[i][j]\n board[i][j] = '#'\n res = self.dfs(board, i + 1, j, word[1:]) or self.dfs(board, i - 1, j, word[1:]) or self.dfs(board, i, j + 1, word[1:]) or self.dfs(board, i, j - 1, word[1:])\n board[i][j] = tmp\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000211", "length_bytes": 1874, "license_type": "no_license", "methods": [{"docstring": ":type board: List[List[str]] :type word: str :rtype: bool", "name": "exist", "signature": "def exist(self, board, word)"}, {"docstring": ":type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool", "name": "dfs", "signature": "def dfs(self, board, i, j, word)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026772", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def exist(self, board, word): :type board: List[List[str]] :type word: str :rtype: bool\n- def dfs(self, board, i, j, word): :type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def exist(self, board, word): :type board: List[List[str]] :type word: str :rtype: bool\n- def dfs(self, board, i, j, word): :type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool\n\n<|skeleton|>\nclass Solution:\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, board, i, j, word):\n \"\"\":type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not board:\n return False\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n if len(word) == 0:\n return True\n if i < 0 or i >= len(board) or j < 0 or (j >= len(board[0])) or (board[i][j] != word[0]):\n return False\n tmp = board[i][j]\n board[i][j] = '#'\n res = self.dfs(board, i + 1, j, word[1:]) or self.dfs(board, i - 1, j, word[1:]) or self.dfs(board, i, j + 1, word[1:]) or self.dfs(board, i, j - 1, word[1:])\n board[i][j] = tmp\n return res\n<|end_body_1|>\n", "revision_id": "b77130a0133cd40990c4d7096db5e388de67cbf2", "skeleton": "<|skeleton|>\nclass Solution:\n\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n <|body_0|>\n\n def dfs(self, board, i, j, word):\n \"\"\":type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def exist(self, board, word):\n \"\"\":type board: List[List[str]] :type word: str :rtype: bool\"\"\"\n if not board:\n return False\n for i in range(len(board)):\n for j in range(len(board[0])):\n if self.dfs(board, i, j, word):\n return True\n return False\n\n def dfs(self, board, i, j, word):\n \"\"\":type board: List[List[str]] :type i: next loc row :type i: next loc col :type word: str :rtype: bool\"\"\"\n if len(word) == 0:\n return True\n if i < 0 or i >= len(board) or j < 0 or (j >= len(board[0])) or (board[i][j] != word[0]):\n return False\n tmp = board[i][j]\n board[i][j] = '#'\n res = self.dfs(board, i + 1, j, word[1:]) or self.dfs(board, i - 1, j, word[1:]) or self.dfs(board, i, j + 1, word[1:]) or self.dfs(board, i, j - 1, word[1:])\n board[i][j] = tmp\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "79.WordSearch.py", "source_repo": "flavorfan/MyLeetCode", "split": "test", "star_events_count": 0} {"blob_id": "02074211e681114fcd60ba233a38de35a666b054", "bodies": ["if amount == 0:\n return 0\nvisited = [False] * (amount + 1)\nnum = 0\ncur_sum = [0]\nnext_sum = []\nwhile cur_sum:\n num += 1\n for val in cur_sum:\n for coin in coins:\n new_val = val + coin\n if new_val == amount:\n return num\n if new_val < amount and (not visited[new_val]):\n next_sum.append(new_val)\n visited[new_val] = True\n cur_sum, next_sum = (next_sum, [])\nreturn -1", "coin_count = [0] + [float('inf')] * amount\nfor coin in coins:\n for idx, val in enumerate(coin_count):\n if coin <= idx:\n coin_count[idx] = min(val, 1 + coin_count[idx - coin])\nif coin_count[-1] == float('inf'):\n return -1\nelse:\n return coin_count[-1]"], "bodies_text": "<|body_start_0|>\n if amount == 0:\n return 0\n visited = [False] * (amount + 1)\n num = 0\n cur_sum = [0]\n next_sum = []\n while cur_sum:\n num += 1\n for val in cur_sum:\n for coin in coins:\n new_val = val + coin\n if new_val == amount:\n return num\n if new_val < amount and (not visited[new_val]):\n next_sum.append(new_val)\n visited[new_val] = True\n cur_sum, next_sum = (next_sum, [])\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n coin_count = [0] + [float('inf')] * amount\n for coin in coins:\n for idx, val in enumerate(coin_count):\n if coin <= idx:\n coin_count[idx] = min(val, 1 + coin_count[idx - coin])\n if coin_count[-1] == float('inf'):\n return -1\n else:\n return coin_count[-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def coinChange_bfs(self, coins, amount):\n \"\"\"Breadth first search :type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_0|>\n\n def coinChange_dp(self, coins, amount):\n \"\"\"Dynamic programming :type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if amount == 0:\n return 0\n visited = [False] * (amount + 1)\n num = 0\n cur_sum = [0]\n next_sum = []\n while cur_sum:\n num += 1\n for val in cur_sum:\n for coin in coins:\n new_val = val + coin\n if new_val == amount:\n return num\n if new_val < amount and (not visited[new_val]):\n next_sum.append(new_val)\n visited[new_val] = True\n cur_sum, next_sum = (next_sum, [])\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n coin_count = [0] + [float('inf')] * amount\n for coin in coins:\n for idx, val in enumerate(coin_count):\n if coin <= idx:\n coin_count[idx] = min(val, 1 + coin_count[idx - coin])\n if coin_count[-1] == float('inf'):\n return -1\n else:\n return coin_count[-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000212", "length_bytes": 1353, "license_type": "no_license", "methods": [{"docstring": "Breadth first search :type coins: List[int] :type amount: int :rtype: int", "name": "coinChange_bfs", "signature": "def coinChange_bfs(self, coins, amount)"}, {"docstring": "Dynamic programming :type coins: List[int] :type amount: int :rtype: int", "name": "coinChange_dp", "signature": "def coinChange_dp(self, coins, amount)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034179", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def coinChange_bfs(self, coins, amount): Breadth first search :type coins: List[int] :type amount: int :rtype: int\n- def coinChange_dp(self, coins, amount): Dynamic programming :type coins: List[int] :type amount: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def coinChange_bfs(self, coins, amount): Breadth first search :type coins: List[int] :type amount: int :rtype: int\n- def coinChange_dp(self, coins, amount): Dynamic programming :type coins: List[int] :type amount: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def coinChange_bfs(self, coins, amount):\n \"\"\"Breadth first search :type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_0|>\n\n def coinChange_dp(self, coins, amount):\n \"\"\"Dynamic programming :type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if amount == 0:\n return 0\n visited = [False] * (amount + 1)\n num = 0\n cur_sum = [0]\n next_sum = []\n while cur_sum:\n num += 1\n for val in cur_sum:\n for coin in coins:\n new_val = val + coin\n if new_val == amount:\n return num\n if new_val < amount and (not visited[new_val]):\n next_sum.append(new_val)\n visited[new_val] = True\n cur_sum, next_sum = (next_sum, [])\n return -1\n<|end_body_0|>\n\n<|body_start_1|>\n coin_count = [0] + [float('inf')] * amount\n for coin in coins:\n for idx, val in enumerate(coin_count):\n if coin <= idx:\n coin_count[idx] = min(val, 1 + coin_count[idx - coin])\n if coin_count[-1] == float('inf'):\n return -1\n else:\n return coin_count[-1]\n<|end_body_1|>\n", "revision_id": "041a369335d20e260582c780c311af0c5c20f1dc", "skeleton": "<|skeleton|>\nclass Solution:\n\n def coinChange_bfs(self, coins, amount):\n \"\"\"Breadth first search :type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_0|>\n\n def coinChange_dp(self, coins, amount):\n \"\"\"Dynamic programming :type coins: List[int] :type amount: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def coinChange_bfs(self, coins, amount):\n \"\"\"Breadth first search :type coins: List[int] :type amount: int :rtype: int\"\"\"\n if amount == 0:\n return 0\n visited = [False] * (amount + 1)\n num = 0\n cur_sum = [0]\n next_sum = []\n while cur_sum:\n num += 1\n for val in cur_sum:\n for coin in coins:\n new_val = val + coin\n if new_val == amount:\n return num\n if new_val < amount and (not visited[new_val]):\n next_sum.append(new_val)\n visited[new_val] = True\n cur_sum, next_sum = (next_sum, [])\n return -1\n\n def coinChange_dp(self, coins, amount):\n \"\"\"Dynamic programming :type coins: List[int] :type amount: int :rtype: int\"\"\"\n coin_count = [0] + [float('inf')] * amount\n for coin in coins:\n for idx, val in enumerate(coin_count):\n if coin <= idx:\n coin_count[idx] = min(val, 1 + coin_count[idx - coin])\n if coin_count[-1] == float('inf'):\n return -1\n else:\n return coin_count[-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "solutions/coin_change.py", "source_repo": "danong/leetcode-solutions", "split": "test", "star_events_count": 0} {"blob_id": "207525b47ca8be0771d01eaba8a96e7127ad115b", "bodies": ["try:\n user = User.objects.get(pk=data)\nexcept User.DoesNotExist:\n raise serializers.ValidationError('Invalid passenger.')\npool = self.context['pool']\ntry:\n membership = Membership.objects.get(user=user, pool=pool, is_active=True)\nexcept Membership.DoesNotExist:\n raise serializers.ValidationError('User is not an active member of the poo.')\nself.context['user'] = user\nself.context['member'] = membership\nreturn data", "trip = self.context['trip']\nif trip.departure_date <= timezone.now():\n raise serializers.ValidationError(\"You can't join this trip now\")\nif trip.available_seats < 1:\n raise serializers.ValidationError('Trip is already full!')\nif trip.passengers.filter(pk=self.context['user'].pk).exists():\n raise serializers.ValidationError('Passenger is already in this trip')\nreturn data", "trip = self.context['trip']\nuser = self.context['user']\ntrip.passengers.add(user)\ntrip.available_seats -= 1\nprofile = user.profile\nprofile.trips_taken += 1\nprofile.save()\nmember = self.context['member']\nmember.trips_taken += 1\nmember.save()\npool = self.context['pool']\npool.trips_taken += 1\npool.save()\nreturn trip"], "bodies_text": "<|body_start_0|>\n try:\n user = User.objects.get(pk=data)\n except User.DoesNotExist:\n raise serializers.ValidationError('Invalid passenger.')\n pool = self.context['pool']\n try:\n membership = Membership.objects.get(user=user, pool=pool, is_active=True)\n except Membership.DoesNotExist:\n raise serializers.ValidationError('User is not an active member of the poo.')\n self.context['user'] = user\n self.context['member'] = membership\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n trip = self.context['trip']\n if trip.departure_date <= timezone.now():\n raise serializers.ValidationError(\"You can't join this trip now\")\n if trip.available_seats < 1:\n raise serializers.ValidationError('Trip is already full!')\n if trip.passengers.filter(pk=self.context['user'].pk).exists():\n raise serializers.ValidationError('Passenger is already in this trip')\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n trip = self.context['trip']\n user = self.context['user']\n trip.passengers.add(user)\n trip.available_seats -= 1\n profile = user.profile\n profile.trips_taken += 1\n profile.save()\n member = self.context['member']\n member.trips_taken += 1\n member.save()\n pool = self.context['pool']\n pool.trips_taken += 1\n pool.save()\n return trip\n<|end_body_2|>\n", "class_docstring": "Join trip serializer.", "class_name": "JoinTripSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JoinTripSerializer:\n \"\"\"Join trip serializer.\"\"\"\n\n def validate_passenger(self, data):\n \"\"\"Verify passenger exists and is a pool member.\"\"\"\n <|body_0|>\n\n def validate(self, data):\n \"\"\"Verify trips allow new passengers.\"\"\"\n <|body_1|>\n\n def update(self, instance, data):\n \"\"\"Add passenger to trip, and update stats.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n user = User.objects.get(pk=data)\n except User.DoesNotExist:\n raise serializers.ValidationError('Invalid passenger.')\n pool = self.context['pool']\n try:\n membership = Membership.objects.get(user=user, pool=pool, is_active=True)\n except Membership.DoesNotExist:\n raise serializers.ValidationError('User is not an active member of the poo.')\n self.context['user'] = user\n self.context['member'] = membership\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n trip = self.context['trip']\n if trip.departure_date <= timezone.now():\n raise serializers.ValidationError(\"You can't join this trip now\")\n if trip.available_seats < 1:\n raise serializers.ValidationError('Trip is already full!')\n if trip.passengers.filter(pk=self.context['user'].pk).exists():\n raise serializers.ValidationError('Passenger is already in this trip')\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n trip = self.context['trip']\n user = self.context['user']\n trip.passengers.add(user)\n trip.available_seats -= 1\n profile = user.profile\n profile.trips_taken += 1\n profile.save()\n member = self.context['member']\n member.trips_taken += 1\n member.save()\n pool = self.context['pool']\n pool.trips_taken += 1\n pool.save()\n return trip\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000213", "length_bytes": 6117, "license_type": "no_license", "methods": [{"docstring": "Verify passenger exists and is a pool member.", "name": "validate_passenger", "signature": "def validate_passenger(self, data)"}, {"docstring": "Verify trips allow new passengers.", "name": "validate", "signature": "def validate(self, data)"}, {"docstring": "Add passenger to trip, and update stats.", "name": "update", "signature": "def update(self, instance, data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_041467", "prompt": "Implement the Python class `JoinTripSerializer` described below.\n\nClass description:\nJoin trip serializer.\n\nMethod signatures and docstrings:\n- def validate_passenger(self, data): Verify passenger exists and is a pool member.\n- def validate(self, data): Verify trips allow new passengers.\n- def update(self, instance, data): Add passenger to trip, and update stats.", "prompted_full_text": "Implement the Python class `JoinTripSerializer` described below.\n\nClass description:\nJoin trip serializer.\n\nMethod signatures and docstrings:\n- def validate_passenger(self, data): Verify passenger exists and is a pool member.\n- def validate(self, data): Verify trips allow new passengers.\n- def update(self, instance, data): Add passenger to trip, and update stats.\n\n<|skeleton|>\nclass JoinTripSerializer:\n \"\"\"Join trip serializer.\"\"\"\n\n def validate_passenger(self, data):\n \"\"\"Verify passenger exists and is a pool member.\"\"\"\n <|body_0|>\n\n def validate(self, data):\n \"\"\"Verify trips allow new passengers.\"\"\"\n <|body_1|>\n\n def update(self, instance, data):\n \"\"\"Add passenger to trip, and update stats.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n user = User.objects.get(pk=data)\n except User.DoesNotExist:\n raise serializers.ValidationError('Invalid passenger.')\n pool = self.context['pool']\n try:\n membership = Membership.objects.get(user=user, pool=pool, is_active=True)\n except Membership.DoesNotExist:\n raise serializers.ValidationError('User is not an active member of the poo.')\n self.context['user'] = user\n self.context['member'] = membership\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n trip = self.context['trip']\n if trip.departure_date <= timezone.now():\n raise serializers.ValidationError(\"You can't join this trip now\")\n if trip.available_seats < 1:\n raise serializers.ValidationError('Trip is already full!')\n if trip.passengers.filter(pk=self.context['user'].pk).exists():\n raise serializers.ValidationError('Passenger is already in this trip')\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n trip = self.context['trip']\n user = self.context['user']\n trip.passengers.add(user)\n trip.available_seats -= 1\n profile = user.profile\n profile.trips_taken += 1\n profile.save()\n member = self.context['member']\n member.trips_taken += 1\n member.save()\n pool = self.context['pool']\n pool.trips_taken += 1\n pool.save()\n return trip\n<|end_body_2|>\n", "revision_id": "ee003cd4cecbcb3ec1a490a3259e8914f78b11cd", "skeleton": "<|skeleton|>\nclass JoinTripSerializer:\n \"\"\"Join trip serializer.\"\"\"\n\n def validate_passenger(self, data):\n \"\"\"Verify passenger exists and is a pool member.\"\"\"\n <|body_0|>\n\n def validate(self, data):\n \"\"\"Verify trips allow new passengers.\"\"\"\n <|body_1|>\n\n def update(self, instance, data):\n \"\"\"Add passenger to trip, and update stats.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class JoinTripSerializer:\n \"\"\"Join trip serializer.\"\"\"\n\n def validate_passenger(self, data):\n \"\"\"Verify passenger exists and is a pool member.\"\"\"\n try:\n user = User.objects.get(pk=data)\n except User.DoesNotExist:\n raise serializers.ValidationError('Invalid passenger.')\n pool = self.context['pool']\n try:\n membership = Membership.objects.get(user=user, pool=pool, is_active=True)\n except Membership.DoesNotExist:\n raise serializers.ValidationError('User is not an active member of the poo.')\n self.context['user'] = user\n self.context['member'] = membership\n return data\n\n def validate(self, data):\n \"\"\"Verify trips allow new passengers.\"\"\"\n trip = self.context['trip']\n if trip.departure_date <= timezone.now():\n raise serializers.ValidationError(\"You can't join this trip now\")\n if trip.available_seats < 1:\n raise serializers.ValidationError('Trip is already full!')\n if trip.passengers.filter(pk=self.context['user'].pk).exists():\n raise serializers.ValidationError('Passenger is already in this trip')\n return data\n\n def update(self, instance, data):\n \"\"\"Add passenger to trip, and update stats.\"\"\"\n trip = self.context['trip']\n user = self.context['user']\n trip.passengers.add(user)\n trip.available_seats -= 1\n profile = user.profile\n profile.trips_taken += 1\n profile.save()\n member = self.context['member']\n member.trips_taken += 1\n member.save()\n pool = self.context['pool']\n pool.trips_taken += 1\n pool.save()\n return trip\n", "source": "the_stack_v2_python_sparse", "source_path": "grupalcar/trips/serializers/trips.py", "source_repo": "adnrbp/GrupalCar-API", "split": "test", "star_events_count": 1} {"blob_id": "89bef6437cb9da11e158ceee629bab5573d7dd56", "bodies": ["super(DeepGP, self).__init__()\nself.linear1 = torch.nn.Linear(1, 30)\nself.tanh1 = torch.nn.Tanh()\nself.linear2 = torch.nn.Linear(30, 30)\nself.tanh2 = torch.nn.Tanh()\nself.linear3 = torch.nn.Linear(30, 6)\nself.tanh3 = torch.nn.Tanh()\nself.linear4 = torch.nn.Linear(6, 1)\nself.tanh4 = torch.nn.Sigmoid()\nself.scale = 10.0\nself.scale = torch.nn.Parameter(torch.Tensor([1.0]))\nself.gp = gprh.GP_1D(sigma_f=1.0, lengthscale=1, sigma_n=2 * noise_std)", "h = x_train.clone()\nh = self.linear1(h)\nh = self.tanh1(h)\nh = self.linear2(h)\nh = self.tanh2(h)\nh = self.linear3(h)\nh = self.tanh3(h)\nh = self.linear4(h)\nh = self.tanh4(h)\nh = self.scale * h\nif x_test is not None:\n h2 = x_test.clone()\n h2 = self.linear1(h2)\n h2 = self.tanh1(h2)\n h2 = self.linear2(h2)\n h2 = self.tanh2(h2)\n h2 = self.linear3(h2)\n h2 = self.tanh3(h2)\n h2 = self.linear4(h2)\n h2 = self.tanh4(h2)\n h2 = self.scale * h2\nelse:\n h2 = None\nif y_train is not None:\n out = self.gp(h, y_train, m, h2)\nelse:\n out = h\nreturn out"], "bodies_text": "<|body_start_0|>\n super(DeepGP, self).__init__()\n self.linear1 = torch.nn.Linear(1, 30)\n self.tanh1 = torch.nn.Tanh()\n self.linear2 = torch.nn.Linear(30, 30)\n self.tanh2 = torch.nn.Tanh()\n self.linear3 = torch.nn.Linear(30, 6)\n self.tanh3 = torch.nn.Tanh()\n self.linear4 = torch.nn.Linear(6, 1)\n self.tanh4 = torch.nn.Sigmoid()\n self.scale = 10.0\n self.scale = torch.nn.Parameter(torch.Tensor([1.0]))\n self.gp = gprh.GP_1D(sigma_f=1.0, lengthscale=1, sigma_n=2 * noise_std)\n<|end_body_0|>\n\n<|body_start_1|>\n h = x_train.clone()\n h = self.linear1(h)\n h = self.tanh1(h)\n h = self.linear2(h)\n h = self.tanh2(h)\n h = self.linear3(h)\n h = self.tanh3(h)\n h = self.linear4(h)\n h = self.tanh4(h)\n h = self.scale * h\n if x_test is not None:\n h2 = x_test.clone()\n h2 = self.linear1(h2)\n h2 = self.tanh1(h2)\n h2 = self.linear2(h2)\n h2 = self.tanh2(h2)\n h2 = self.linear3(h2)\n h2 = self.tanh3(h2)\n h2 = self.linear4(h2)\n h2 = self.tanh4(h2)\n h2 = self.scale * h2\n else:\n h2 = None\n if y_train is not None:\n out = self.gp(h, y_train, m, h2)\n else:\n out = h\n return out\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DeepGP", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeepGP:\n\n def __init__(self):\n \"\"\"In the constructor we instantiate two nn.Linear modules and assign them as member variables.\"\"\"\n <|body_0|>\n\n def forward(self, x_train, y_train=None, m=None, x_test=None):\n \"\"\"In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DeepGP, self).__init__()\n self.linear1 = torch.nn.Linear(1, 30)\n self.tanh1 = torch.nn.Tanh()\n self.linear2 = torch.nn.Linear(30, 30)\n self.tanh2 = torch.nn.Tanh()\n self.linear3 = torch.nn.Linear(30, 6)\n self.tanh3 = torch.nn.Tanh()\n self.linear4 = torch.nn.Linear(6, 1)\n self.tanh4 = torch.nn.Sigmoid()\n self.scale = 10.0\n self.scale = torch.nn.Parameter(torch.Tensor([1.0]))\n self.gp = gprh.GP_1D(sigma_f=1.0, lengthscale=1, sigma_n=2 * noise_std)\n<|end_body_0|>\n\n<|body_start_1|>\n h = x_train.clone()\n h = self.linear1(h)\n h = self.tanh1(h)\n h = self.linear2(h)\n h = self.tanh2(h)\n h = self.linear3(h)\n h = self.tanh3(h)\n h = self.linear4(h)\n h = self.tanh4(h)\n h = self.scale * h\n if x_test is not None:\n h2 = x_test.clone()\n h2 = self.linear1(h2)\n h2 = self.tanh1(h2)\n h2 = self.linear2(h2)\n h2 = self.tanh2(h2)\n h2 = self.linear3(h2)\n h2 = self.tanh3(h2)\n h2 = self.linear4(h2)\n h2 = self.tanh4(h2)\n h2 = self.scale * h2\n else:\n h2 = None\n if y_train is not None:\n out = self.gp(h, y_train, m, h2)\n else:\n out = h\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000214", "length_bytes": 4379, "license_type": "no_license", "methods": [{"docstring": "In the constructor we instantiate two nn.Linear modules and assign them as member variables.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.", "name": "forward", "signature": "def forward(self, x_train, y_train=None, m=None, x_test=None)"}], "n_methods": 2, "prompt": "Implement the Python class `DeepGP` described below.\n\nClass description:\nImplement the DeepGP class.\n\nMethod signatures and docstrings:\n- def __init__(self): In the constructor we instantiate two nn.Linear modules and assign them as member variables.\n- def forward(self, x_train, y_train=None, m=None, x_test=None): In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.", "prompted_full_text": "Implement the Python class `DeepGP` described below.\n\nClass description:\nImplement the DeepGP class.\n\nMethod signatures and docstrings:\n- def __init__(self): In the constructor we instantiate two nn.Linear modules and assign them as member variables.\n- def forward(self, x_train, y_train=None, m=None, x_test=None): In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.\n\n<|skeleton|>\nclass DeepGP:\n\n def __init__(self):\n \"\"\"In the constructor we instantiate two nn.Linear modules and assign them as member variables.\"\"\"\n <|body_0|>\n\n def forward(self, x_train, y_train=None, m=None, x_test=None):\n \"\"\"In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DeepGP, self).__init__()\n self.linear1 = torch.nn.Linear(1, 30)\n self.tanh1 = torch.nn.Tanh()\n self.linear2 = torch.nn.Linear(30, 30)\n self.tanh2 = torch.nn.Tanh()\n self.linear3 = torch.nn.Linear(30, 6)\n self.tanh3 = torch.nn.Tanh()\n self.linear4 = torch.nn.Linear(6, 1)\n self.tanh4 = torch.nn.Sigmoid()\n self.scale = 10.0\n self.scale = torch.nn.Parameter(torch.Tensor([1.0]))\n self.gp = gprh.GP_1D(sigma_f=1.0, lengthscale=1, sigma_n=2 * noise_std)\n<|end_body_0|>\n\n<|body_start_1|>\n h = x_train.clone()\n h = self.linear1(h)\n h = self.tanh1(h)\n h = self.linear2(h)\n h = self.tanh2(h)\n h = self.linear3(h)\n h = self.tanh3(h)\n h = self.linear4(h)\n h = self.tanh4(h)\n h = self.scale * h\n if x_test is not None:\n h2 = x_test.clone()\n h2 = self.linear1(h2)\n h2 = self.tanh1(h2)\n h2 = self.linear2(h2)\n h2 = self.tanh2(h2)\n h2 = self.linear3(h2)\n h2 = self.tanh3(h2)\n h2 = self.linear4(h2)\n h2 = self.tanh4(h2)\n h2 = self.scale * h2\n else:\n h2 = None\n if y_train is not None:\n out = self.gp(h, y_train, m, h2)\n else:\n out = h\n return out\n<|end_body_1|>\n", "revision_id": "8a1d6792faec1292bd13e148d378c0eb7db9247a", "skeleton": "<|skeleton|>\nclass DeepGP:\n\n def __init__(self):\n \"\"\"In the constructor we instantiate two nn.Linear modules and assign them as member variables.\"\"\"\n <|body_0|>\n\n def forward(self, x_train, y_train=None, m=None, x_test=None):\n \"\"\"In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeepGP:\n def __init__(self):\n \"\"\"In the constructor we instantiate two nn.Linear modules and assign them as member variables.\"\"\"\n super(DeepGP, self).__init__()\n self.linear1 = torch.nn.Linear(1, 30)\n self.tanh1 = torch.nn.Tanh()\n self.linear2 = torch.nn.Linear(30, 30)\n self.tanh2 = torch.nn.Tanh()\n self.linear3 = torch.nn.Linear(30, 6)\n self.tanh3 = torch.nn.Tanh()\n self.linear4 = torch.nn.Linear(6, 1)\n self.tanh4 = torch.nn.Sigmoid()\n self.scale = 10.0\n self.scale = torch.nn.Parameter(torch.Tensor([1.0]))\n self.gp = gprh.GP_1D(sigma_f=1.0, lengthscale=1, sigma_n=2 * noise_std)\n\n def forward(self, x_train, y_train=None, m=None, x_test=None):\n \"\"\"In the forward function we accept a Tensor of input data and we must return a Tensor of output data. We can use Modules defined in the constructor as well as arbitrary operators on Tensors.\"\"\"\n h = x_train.clone()\n h = self.linear1(h)\n h = self.tanh1(h)\n h = self.linear2(h)\n h = self.tanh2(h)\n h = self.linear3(h)\n h = self.tanh3(h)\n h = self.linear4(h)\n h = self.tanh4(h)\n h = self.scale * h\n if x_test is not None:\n h2 = x_test.clone()\n h2 = self.linear1(h2)\n h2 = self.tanh1(h2)\n h2 = self.linear2(h2)\n h2 = self.tanh2(h2)\n h2 = self.linear3(h2)\n h2 = self.tanh3(h2)\n h2 = self.linear4(h2)\n h2 = self.tanh4(h2)\n h2 = self.scale * h2\n else:\n h2 = None\n if y_train is not None:\n out = self.gp(h, y_train, m, h2)\n else:\n out = h\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "example_1D_hilbert_NN.py", "source_repo": "jnh277/deepGPforCT", "split": "test", "star_events_count": 0} {"blob_id": "906a237e3a12bf15b439bf74ead13246258528c7", "bodies": ["df = df[[x_label, y_label]].dropna()\nx = np.array(df[x_label])\ny = np.array(df[y_label])\nxdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x, y=y)\nx50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\nreturn (x50, y50)", "if xypos_ic50_f is None:\n xypos_ic50_f = [1, 1.4, 0.5, 0.5]\nxpos_ic50, ypos_ic50, xpos_f, ypos_f = xypos_ic50_f\ndf = df[[x_label, y_label]].dropna()\nx_array = np.array(df[x_label])\ny_array = np.array(df[y_label])\nxdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x_array, y=y_array)\nx50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\nif legend_label is None:\n legend_label = 'data'\nax = sns.lineplot(ax=ax, data=df, x=x_label, y=y_array, color=color, legend=True, linewidth=3, linestyle='-', label=legend_label, err_style='bars')\nif fitted:\n ax = sns.lineplot(ax=ax, x=xdata, y=ydata, linestyle='--', linewidth=3, color='black', label='fitted')\n if log_scale:\n ax.set(xscale='log')\n arrow_properties = dict(facecolor='black', width=0.5, headwidth=4, shrink=0.1)\n plt.annotate(f'IC50 = {round(x50, 1)} {ic50_unit}', (x50, y50), (x50 * xpos_ic50, y50 * ypos_ic50), horizontalalignment='center', arrowprops=arrow_properties, fontsize=size_info)\n plt.legend(fontsize=size_legend)\n plt.text(x50 * xpos_f, y50 * ypos_f, str_formula, fontsize=size_info)\nelse:\n ax = plt.gca()\n ax.get_legend().remove()\nplt.ylabel(y_label, size=size_label, weight='bold')\nplt.xlabel(x_label, size=size_label, weight='bold')\nsns.despine()\nreturn ax"], "bodies_text": "<|body_start_0|>\n df = df[[x_label, y_label]].dropna()\n x = np.array(df[x_label])\n y = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x, y=y)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n return (x50, y50)\n<|end_body_0|>\n\n<|body_start_1|>\n if xypos_ic50_f is None:\n xypos_ic50_f = [1, 1.4, 0.5, 0.5]\n xpos_ic50, ypos_ic50, xpos_f, ypos_f = xypos_ic50_f\n df = df[[x_label, y_label]].dropna()\n x_array = np.array(df[x_label])\n y_array = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x_array, y=y_array)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n if legend_label is None:\n legend_label = 'data'\n ax = sns.lineplot(ax=ax, data=df, x=x_label, y=y_array, color=color, legend=True, linewidth=3, linestyle='-', label=legend_label, err_style='bars')\n if fitted:\n ax = sns.lineplot(ax=ax, x=xdata, y=ydata, linestyle='--', linewidth=3, color='black', label='fitted')\n if log_scale:\n ax.set(xscale='log')\n arrow_properties = dict(facecolor='black', width=0.5, headwidth=4, shrink=0.1)\n plt.annotate(f'IC50 = {round(x50, 1)} {ic50_unit}', (x50, y50), (x50 * xpos_ic50, y50 * ypos_ic50), horizontalalignment='center', arrowprops=arrow_properties, fontsize=size_info)\n plt.legend(fontsize=size_legend)\n plt.text(x50 * xpos_f, y50 * ypos_f, str_formula, fontsize=size_info)\n else:\n ax = plt.gca()\n ax.get_legend().remove()\n plt.ylabel(y_label, size=size_label, weight='bold')\n plt.xlabel(x_label, size=size_label, weight='bold')\n sns.despine()\n return ax\n<|end_body_1|>\n", "class_docstring": "Class for potency analysis", "class_name": "PotencyAnalysis", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PotencyAnalysis:\n \"\"\"Class for potency analysis\"\"\"\n\n def get_ic50(df=None, x_label=None, y_label=None):\n \"\"\"Get ic50 for x depending on y\"\"\"\n <|body_0|>\n\n def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18):\n \"\"\"Plot lineplot for concentration with fitted curve used to determine IC50\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n df = df[[x_label, y_label]].dropna()\n x = np.array(df[x_label])\n y = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x, y=y)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n return (x50, y50)\n<|end_body_0|>\n\n<|body_start_1|>\n if xypos_ic50_f is None:\n xypos_ic50_f = [1, 1.4, 0.5, 0.5]\n xpos_ic50, ypos_ic50, xpos_f, ypos_f = xypos_ic50_f\n df = df[[x_label, y_label]].dropna()\n x_array = np.array(df[x_label])\n y_array = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x_array, y=y_array)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n if legend_label is None:\n legend_label = 'data'\n ax = sns.lineplot(ax=ax, data=df, x=x_label, y=y_array, color=color, legend=True, linewidth=3, linestyle='-', label=legend_label, err_style='bars')\n if fitted:\n ax = sns.lineplot(ax=ax, x=xdata, y=ydata, linestyle='--', linewidth=3, color='black', label='fitted')\n if log_scale:\n ax.set(xscale='log')\n arrow_properties = dict(facecolor='black', width=0.5, headwidth=4, shrink=0.1)\n plt.annotate(f'IC50 = {round(x50, 1)} {ic50_unit}', (x50, y50), (x50 * xpos_ic50, y50 * ypos_ic50), horizontalalignment='center', arrowprops=arrow_properties, fontsize=size_info)\n plt.legend(fontsize=size_legend)\n plt.text(x50 * xpos_f, y50 * ypos_f, str_formula, fontsize=size_info)\n else:\n ax = plt.gca()\n ax.get_legend().remove()\n plt.ylabel(y_label, size=size_label, weight='bold')\n plt.xlabel(x_label, size=size_label, weight='bold')\n sns.despine()\n return ax\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000215", "length_bytes": 4285, "license_type": "no_license", "methods": [{"docstring": "Get ic50 for x depending on y", "name": "get_ic50", "signature": "def get_ic50(df=None, x_label=None, y_label=None)"}, {"docstring": "Plot lineplot for concentration with fitted curve used to determine IC50", "name": "plot_ic50", "signature": "def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18)"}], "n_methods": 2, "prompt": "Implement the Python class `PotencyAnalysis` described below.\n\nClass description:\nClass for potency analysis\n\nMethod signatures and docstrings:\n- def get_ic50(df=None, x_label=None, y_label=None): Get ic50 for x depending on y\n- def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18): Plot lineplot for concentration with fitted curve used to determine IC50", "prompted_full_text": "Implement the Python class `PotencyAnalysis` described below.\n\nClass description:\nClass for potency analysis\n\nMethod signatures and docstrings:\n- def get_ic50(df=None, x_label=None, y_label=None): Get ic50 for x depending on y\n- def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18): Plot lineplot for concentration with fitted curve used to determine IC50\n\n<|skeleton|>\nclass PotencyAnalysis:\n \"\"\"Class for potency analysis\"\"\"\n\n def get_ic50(df=None, x_label=None, y_label=None):\n \"\"\"Get ic50 for x depending on y\"\"\"\n <|body_0|>\n\n def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18):\n \"\"\"Plot lineplot for concentration with fitted curve used to determine IC50\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n df = df[[x_label, y_label]].dropna()\n x = np.array(df[x_label])\n y = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x, y=y)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n return (x50, y50)\n<|end_body_0|>\n\n<|body_start_1|>\n if xypos_ic50_f is None:\n xypos_ic50_f = [1, 1.4, 0.5, 0.5]\n xpos_ic50, ypos_ic50, xpos_f, ypos_f = xypos_ic50_f\n df = df[[x_label, y_label]].dropna()\n x_array = np.array(df[x_label])\n y_array = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x_array, y=y_array)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n if legend_label is None:\n legend_label = 'data'\n ax = sns.lineplot(ax=ax, data=df, x=x_label, y=y_array, color=color, legend=True, linewidth=3, linestyle='-', label=legend_label, err_style='bars')\n if fitted:\n ax = sns.lineplot(ax=ax, x=xdata, y=ydata, linestyle='--', linewidth=3, color='black', label='fitted')\n if log_scale:\n ax.set(xscale='log')\n arrow_properties = dict(facecolor='black', width=0.5, headwidth=4, shrink=0.1)\n plt.annotate(f'IC50 = {round(x50, 1)} {ic50_unit}', (x50, y50), (x50 * xpos_ic50, y50 * ypos_ic50), horizontalalignment='center', arrowprops=arrow_properties, fontsize=size_info)\n plt.legend(fontsize=size_legend)\n plt.text(x50 * xpos_f, y50 * ypos_f, str_formula, fontsize=size_info)\n else:\n ax = plt.gca()\n ax.get_legend().remove()\n plt.ylabel(y_label, size=size_label, weight='bold')\n plt.xlabel(x_label, size=size_label, weight='bold')\n sns.despine()\n return ax\n<|end_body_1|>\n", "revision_id": "6a7e44ea11532bb73afe4745df1e2bb42a9e25a6", "skeleton": "<|skeleton|>\nclass PotencyAnalysis:\n \"\"\"Class for potency analysis\"\"\"\n\n def get_ic50(df=None, x_label=None, y_label=None):\n \"\"\"Get ic50 for x depending on y\"\"\"\n <|body_0|>\n\n def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18):\n \"\"\"Plot lineplot for concentration with fitted curve used to determine IC50\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PotencyAnalysis:\n \"\"\"Class for potency analysis\"\"\"\n\n def get_ic50(df=None, x_label=None, y_label=None):\n \"\"\"Get ic50 for x depending on y\"\"\"\n df = df[[x_label, y_label]].dropna()\n x = np.array(df[x_label])\n y = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x, y=y)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n return (x50, y50)\n\n def plot_ic50(ax=None, df=None, x_label=None, y_label=None, legend_label=None, fitted=True, xypos_ic50_f=None, log_scale=True, color=None, ic50_unit='µM', size_label=20, size_legend=16, size_info=18):\n \"\"\"Plot lineplot for concentration with fitted curve used to determine IC50\"\"\"\n if xypos_ic50_f is None:\n xypos_ic50_f = [1, 1.4, 0.5, 0.5]\n xpos_ic50, ypos_ic50, xpos_f, ypos_f = xypos_ic50_f\n df = df[[x_label, y_label]].dropna()\n x_array = np.array(df[x_label])\n y_array = np.array(df[y_label])\n xdata, ydata, str_formula = _get_fitted_curve(f=_exp_reg, x=x_array, y=y_array)\n x50, y50 = _get_x50_y50(xdata=xdata, ydata=ydata)\n if legend_label is None:\n legend_label = 'data'\n ax = sns.lineplot(ax=ax, data=df, x=x_label, y=y_array, color=color, legend=True, linewidth=3, linestyle='-', label=legend_label, err_style='bars')\n if fitted:\n ax = sns.lineplot(ax=ax, x=xdata, y=ydata, linestyle='--', linewidth=3, color='black', label='fitted')\n if log_scale:\n ax.set(xscale='log')\n arrow_properties = dict(facecolor='black', width=0.5, headwidth=4, shrink=0.1)\n plt.annotate(f'IC50 = {round(x50, 1)} {ic50_unit}', (x50, y50), (x50 * xpos_ic50, y50 * ypos_ic50), horizontalalignment='center', arrowprops=arrow_properties, fontsize=size_info)\n plt.legend(fontsize=size_legend)\n plt.text(x50 * xpos_f, y50 * ypos_f, str_formula, fontsize=size_info)\n else:\n ax = plt.gca()\n ax.get_legend().remove()\n plt.ylabel(y_label, size=size_label, weight='bold')\n plt.xlabel(x_label, size=size_label, weight='bold')\n sns.despine()\n return ax\n", "source": "the_stack_v2_python_sparse", "source_path": "proteomics_tools/potency_analysis/potency.py", "source_repo": "stephanbreimann/Tools_Proteomics", "split": "test", "star_events_count": 0} {"blob_id": "a7a892f59b0888cf109fbf945d3284dd6b3e5466", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn ConditionalAccessRoot()", "from .authentication_context_class_reference import AuthenticationContextClassReference\nfrom .authentication_strength_root import AuthenticationStrengthRoot\nfrom .conditional_access_policy import ConditionalAccessPolicy\nfrom .conditional_access_template import ConditionalAccessTemplate\nfrom .entity import Entity\nfrom .named_location import NamedLocation\nfrom .authentication_context_class_reference import AuthenticationContextClassReference\nfrom .authentication_strength_root import AuthenticationStrengthRoot\nfrom .conditional_access_policy import ConditionalAccessPolicy\nfrom .conditional_access_template import ConditionalAccessTemplate\nfrom .entity import Entity\nfrom .named_location import NamedLocation\nfields: Dict[str, Callable[[Any], None]] = {'authenticationContextClassReferences': lambda n: setattr(self, 'authentication_context_class_references', n.get_collection_of_object_values(AuthenticationContextClassReference)), 'authenticationStrength': lambda n: setattr(self, 'authentication_strength', n.get_object_value(AuthenticationStrengthRoot)), 'namedLocations': lambda n: setattr(self, 'named_locations', n.get_collection_of_object_values(NamedLocation)), 'policies': lambda n: setattr(self, 'policies', n.get_collection_of_object_values(ConditionalAccessPolicy)), 'templates': lambda n: setattr(self, 'templates', n.get_collection_of_object_values(ConditionalAccessTemplate))}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_collection_of_object_values('authenticationContextClassReferences', self.authentication_context_class_references)\nwriter.write_object_value('authenticationStrength', self.authentication_strength)\nwriter.write_collection_of_object_values('namedLocations', self.named_locations)\nwriter.write_collection_of_object_values('policies', self.policies)\nwriter.write_collection_of_object_values('templates', self.templates)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ConditionalAccessRoot()\n<|end_body_0|>\n\n<|body_start_1|>\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n fields: Dict[str, Callable[[Any], None]] = {'authenticationContextClassReferences': lambda n: setattr(self, 'authentication_context_class_references', n.get_collection_of_object_values(AuthenticationContextClassReference)), 'authenticationStrength': lambda n: setattr(self, 'authentication_strength', n.get_object_value(AuthenticationStrengthRoot)), 'namedLocations': lambda n: setattr(self, 'named_locations', n.get_collection_of_object_values(NamedLocation)), 'policies': lambda n: setattr(self, 'policies', n.get_collection_of_object_values(ConditionalAccessPolicy)), 'templates': lambda n: setattr(self, 'templates', n.get_collection_of_object_values(ConditionalAccessTemplate))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('authenticationContextClassReferences', self.authentication_context_class_references)\n writer.write_object_value('authenticationStrength', self.authentication_strength)\n writer.write_collection_of_object_values('namedLocations', self.named_locations)\n writer.write_collection_of_object_values('policies', self.policies)\n writer.write_collection_of_object_values('templates', self.templates)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ConditionalAccessRoot", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConditionalAccessRoot:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ConditionalAccessRoot()\n<|end_body_0|>\n\n<|body_start_1|>\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n fields: Dict[str, Callable[[Any], None]] = {'authenticationContextClassReferences': lambda n: setattr(self, 'authentication_context_class_references', n.get_collection_of_object_values(AuthenticationContextClassReference)), 'authenticationStrength': lambda n: setattr(self, 'authentication_strength', n.get_object_value(AuthenticationStrengthRoot)), 'namedLocations': lambda n: setattr(self, 'named_locations', n.get_collection_of_object_values(NamedLocation)), 'policies': lambda n: setattr(self, 'policies', n.get_collection_of_object_values(ConditionalAccessPolicy)), 'templates': lambda n: setattr(self, 'templates', n.get_collection_of_object_values(ConditionalAccessTemplate))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('authenticationContextClassReferences', self.authentication_context_class_references)\n writer.write_object_value('authenticationStrength', self.authentication_strength)\n writer.write_collection_of_object_values('namedLocations', self.named_locations)\n writer.write_collection_of_object_values('policies', self.policies)\n writer.write_collection_of_object_values('templates', self.templates)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000216", "length_bytes": 4813, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000184", "prompt": "Implement the Python class `ConditionalAccessRoot` described below.\n\nClass description:\nImplement the ConditionalAccessRoot class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `ConditionalAccessRoot` described below.\n\nClass description:\nImplement the ConditionalAccessRoot class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass ConditionalAccessRoot:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ConditionalAccessRoot()\n<|end_body_0|>\n\n<|body_start_1|>\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n fields: Dict[str, Callable[[Any], None]] = {'authenticationContextClassReferences': lambda n: setattr(self, 'authentication_context_class_references', n.get_collection_of_object_values(AuthenticationContextClassReference)), 'authenticationStrength': lambda n: setattr(self, 'authentication_strength', n.get_object_value(AuthenticationStrengthRoot)), 'namedLocations': lambda n: setattr(self, 'named_locations', n.get_collection_of_object_values(NamedLocation)), 'policies': lambda n: setattr(self, 'policies', n.get_collection_of_object_values(ConditionalAccessPolicy)), 'templates': lambda n: setattr(self, 'templates', n.get_collection_of_object_values(ConditionalAccessTemplate))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('authenticationContextClassReferences', self.authentication_context_class_references)\n writer.write_object_value('authenticationStrength', self.authentication_strength)\n writer.write_collection_of_object_values('namedLocations', self.named_locations)\n writer.write_collection_of_object_values('policies', self.policies)\n writer.write_collection_of_object_values('templates', self.templates)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass ConditionalAccessRoot:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConditionalAccessRoot:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> ConditionalAccessRoot:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: ConditionalAccessRoot\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return ConditionalAccessRoot()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n from .authentication_context_class_reference import AuthenticationContextClassReference\n from .authentication_strength_root import AuthenticationStrengthRoot\n from .conditional_access_policy import ConditionalAccessPolicy\n from .conditional_access_template import ConditionalAccessTemplate\n from .entity import Entity\n from .named_location import NamedLocation\n fields: Dict[str, Callable[[Any], None]] = {'authenticationContextClassReferences': lambda n: setattr(self, 'authentication_context_class_references', n.get_collection_of_object_values(AuthenticationContextClassReference)), 'authenticationStrength': lambda n: setattr(self, 'authentication_strength', n.get_object_value(AuthenticationStrengthRoot)), 'namedLocations': lambda n: setattr(self, 'named_locations', n.get_collection_of_object_values(NamedLocation)), 'policies': lambda n: setattr(self, 'policies', n.get_collection_of_object_values(ConditionalAccessPolicy)), 'templates': lambda n: setattr(self, 'templates', n.get_collection_of_object_values(ConditionalAccessTemplate))}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_collection_of_object_values('authenticationContextClassReferences', self.authentication_context_class_references)\n writer.write_object_value('authenticationStrength', self.authentication_strength)\n writer.write_collection_of_object_values('namedLocations', self.named_locations)\n writer.write_collection_of_object_values('policies', self.policies)\n writer.write_collection_of_object_values('templates', self.templates)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/conditional_access_root.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "65ea03057d6166684db19be128bde18d69443c80", "bodies": ["user_uuid = get_jwt_identity()\ntry:\n page = int(request.args.get('page'))\nexcept (ValueError, TypeError):\n page = 1\nreturn SerieService.get_popular_series(page, user_uuid)", "user_uuid = get_jwt_identity()\ndata = request.get_json()\nreturn SerieService.add_additional_serie(user_uuid, data)"], "bodies_text": "<|body_start_0|>\n user_uuid = get_jwt_identity()\n try:\n page = int(request.args.get('page'))\n except (ValueError, TypeError):\n page = 1\n return SerieService.get_popular_series(page, user_uuid)\n<|end_body_0|>\n\n<|body_start_1|>\n user_uuid = get_jwt_identity()\n data = request.get_json()\n return SerieService.add_additional_serie(user_uuid, data)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SerieResource", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SerieResource:\n\n def get(self):\n \"\"\"Get list of the most popular Series\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Add additional Serie for validation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_uuid = get_jwt_identity()\n try:\n page = int(request.args.get('page'))\n except (ValueError, TypeError):\n page = 1\n return SerieService.get_popular_series(page, user_uuid)\n<|end_body_0|>\n\n<|body_start_1|>\n user_uuid = get_jwt_identity()\n data = request.get_json()\n return SerieService.add_additional_serie(user_uuid, data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000217", "length_bytes": 8589, "license_type": "no_license", "methods": [{"docstring": "Get list of the most popular Series", "name": "get", "signature": "def get(self)"}, {"docstring": "Add additional Serie for validation", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_010745", "prompt": "Implement the Python class `SerieResource` described below.\n\nClass description:\nImplement the SerieResource class.\n\nMethod signatures and docstrings:\n- def get(self): Get list of the most popular Series\n- def post(self): Add additional Serie for validation", "prompted_full_text": "Implement the Python class `SerieResource` described below.\n\nClass description:\nImplement the SerieResource class.\n\nMethod signatures and docstrings:\n- def get(self): Get list of the most popular Series\n- def post(self): Add additional Serie for validation\n\n<|skeleton|>\nclass SerieResource:\n\n def get(self):\n \"\"\"Get list of the most popular Series\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Add additional Serie for validation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user_uuid = get_jwt_identity()\n try:\n page = int(request.args.get('page'))\n except (ValueError, TypeError):\n page = 1\n return SerieService.get_popular_series(page, user_uuid)\n<|end_body_0|>\n\n<|body_start_1|>\n user_uuid = get_jwt_identity()\n data = request.get_json()\n return SerieService.add_additional_serie(user_uuid, data)\n<|end_body_1|>\n", "revision_id": "2e7b4e07f149ede884cfe37130d9842ff9bb7be2", "skeleton": "<|skeleton|>\nclass SerieResource:\n\n def get(self):\n \"\"\"Get list of the most popular Series\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Add additional Serie for validation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SerieResource:\n def get(self):\n \"\"\"Get list of the most popular Series\"\"\"\n user_uuid = get_jwt_identity()\n try:\n page = int(request.args.get('page'))\n except (ValueError, TypeError):\n page = 1\n return SerieService.get_popular_series(page, user_uuid)\n\n def post(self):\n \"\"\"Add additional Serie for validation\"\"\"\n user_uuid = get_jwt_identity()\n data = request.get_json()\n return SerieService.add_additional_serie(user_uuid, data)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/resources/serie_resource.py", "source_repo": "RomainCtl/RecoFinement-api", "split": "test", "star_events_count": 0} {"blob_id": "365a2f472d9aca3aa563288bafbac59cafe06ad5", "bodies": ["cache_blog_recent_post = cache.get('pythonizame_recent_playlist')\nif cache_blog_recent_post:\n queryset = cache_blog_recent_post\nelse:\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')[:5]\n cache.set('pythonizame_recent_playlist', queryset, 60 * 5)\nreturn queryset", "if 'q' in request.GET and request.GET['q']:\n search_words = request.GET['q']\n queryset = video_search(search_words)\nelse:\n search_words = ''\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')\npaginator = Paginator(queryset, 18)\nif 'page' in request.GET and request.GET['page']:\n my_page = int(request.GET['page'])\nelse:\n my_page = 1\ntry:\n object_list = paginator.page(my_page)\nexcept PageNotAnInteger:\n object_list = paginator.page(1)\nexcept EmptyPage:\n object_list = paginator.page(paginator.num_pages)\ncategories = VideoCategory.objects.all()\nctx = {'object_list': object_list, 'categories': categories, 'page_obj': object_list, 'recent_playlist': self.get_recent_playlist(), 'queryset': search_words}\nreturn render(request, self.template_name, ctx)"], "bodies_text": "<|body_start_0|>\n cache_blog_recent_post = cache.get('pythonizame_recent_playlist')\n if cache_blog_recent_post:\n queryset = cache_blog_recent_post\n else:\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')[:5]\n cache.set('pythonizame_recent_playlist', queryset, 60 * 5)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n if 'q' in request.GET and request.GET['q']:\n search_words = request.GET['q']\n queryset = video_search(search_words)\n else:\n search_words = ''\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')\n paginator = Paginator(queryset, 18)\n if 'page' in request.GET and request.GET['page']:\n my_page = int(request.GET['page'])\n else:\n my_page = 1\n try:\n object_list = paginator.page(my_page)\n except PageNotAnInteger:\n object_list = paginator.page(1)\n except EmptyPage:\n object_list = paginator.page(paginator.num_pages)\n categories = VideoCategory.objects.all()\n ctx = {'object_list': object_list, 'categories': categories, 'page_obj': object_list, 'recent_playlist': self.get_recent_playlist(), 'queryset': search_words}\n return render(request, self.template_name, ctx)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "IndexView", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IndexView:\n\n def get_recent_playlist():\n \"\"\"Obtenemos las últimas 5 publicaciones :return:\"\"\"\n <|body_0|>\n\n def get(self, request):\n \"\"\"Devolvemos consulta de publicaciones al usuario\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache_blog_recent_post = cache.get('pythonizame_recent_playlist')\n if cache_blog_recent_post:\n queryset = cache_blog_recent_post\n else:\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')[:5]\n cache.set('pythonizame_recent_playlist', queryset, 60 * 5)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n if 'q' in request.GET and request.GET['q']:\n search_words = request.GET['q']\n queryset = video_search(search_words)\n else:\n search_words = ''\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')\n paginator = Paginator(queryset, 18)\n if 'page' in request.GET and request.GET['page']:\n my_page = int(request.GET['page'])\n else:\n my_page = 1\n try:\n object_list = paginator.page(my_page)\n except PageNotAnInteger:\n object_list = paginator.page(1)\n except EmptyPage:\n object_list = paginator.page(paginator.num_pages)\n categories = VideoCategory.objects.all()\n ctx = {'object_list': object_list, 'categories': categories, 'page_obj': object_list, 'recent_playlist': self.get_recent_playlist(), 'queryset': search_words}\n return render(request, self.template_name, ctx)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000218", "length_bytes": 5054, "license_type": "permissive", "methods": [{"docstring": "Obtenemos las últimas 5 publicaciones :return:", "name": "get_recent_playlist", "signature": "def get_recent_playlist()"}, {"docstring": "Devolvemos consulta de publicaciones al usuario", "name": "get", "signature": "def get(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005209", "prompt": "Implement the Python class `IndexView` described below.\n\nClass description:\nImplement the IndexView class.\n\nMethod signatures and docstrings:\n- def get_recent_playlist(): Obtenemos las últimas 5 publicaciones :return:\n- def get(self, request): Devolvemos consulta de publicaciones al usuario", "prompted_full_text": "Implement the Python class `IndexView` described below.\n\nClass description:\nImplement the IndexView class.\n\nMethod signatures and docstrings:\n- def get_recent_playlist(): Obtenemos las últimas 5 publicaciones :return:\n- def get(self, request): Devolvemos consulta de publicaciones al usuario\n\n<|skeleton|>\nclass IndexView:\n\n def get_recent_playlist():\n \"\"\"Obtenemos las últimas 5 publicaciones :return:\"\"\"\n <|body_0|>\n\n def get(self, request):\n \"\"\"Devolvemos consulta de publicaciones al usuario\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cache_blog_recent_post = cache.get('pythonizame_recent_playlist')\n if cache_blog_recent_post:\n queryset = cache_blog_recent_post\n else:\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')[:5]\n cache.set('pythonizame_recent_playlist', queryset, 60 * 5)\n return queryset\n<|end_body_0|>\n\n<|body_start_1|>\n if 'q' in request.GET and request.GET['q']:\n search_words = request.GET['q']\n queryset = video_search(search_words)\n else:\n search_words = ''\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')\n paginator = Paginator(queryset, 18)\n if 'page' in request.GET and request.GET['page']:\n my_page = int(request.GET['page'])\n else:\n my_page = 1\n try:\n object_list = paginator.page(my_page)\n except PageNotAnInteger:\n object_list = paginator.page(1)\n except EmptyPage:\n object_list = paginator.page(paginator.num_pages)\n categories = VideoCategory.objects.all()\n ctx = {'object_list': object_list, 'categories': categories, 'page_obj': object_list, 'recent_playlist': self.get_recent_playlist(), 'queryset': search_words}\n return render(request, self.template_name, ctx)\n<|end_body_1|>\n", "revision_id": "6483aa90859fc1bfbc01540efe4ffcf6fe87e450", "skeleton": "<|skeleton|>\nclass IndexView:\n\n def get_recent_playlist():\n \"\"\"Obtenemos las últimas 5 publicaciones :return:\"\"\"\n <|body_0|>\n\n def get(self, request):\n \"\"\"Devolvemos consulta de publicaciones al usuario\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IndexView:\n def get_recent_playlist():\n \"\"\"Obtenemos las últimas 5 publicaciones :return:\"\"\"\n cache_blog_recent_post = cache.get('pythonizame_recent_playlist')\n if cache_blog_recent_post:\n queryset = cache_blog_recent_post\n else:\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')[:5]\n cache.set('pythonizame_recent_playlist', queryset, 60 * 5)\n return queryset\n\n def get(self, request):\n \"\"\"Devolvemos consulta de publicaciones al usuario\"\"\"\n if 'q' in request.GET and request.GET['q']:\n search_words = request.GET['q']\n queryset = video_search(search_words)\n else:\n search_words = ''\n queryset = PlayList.objects.filter(status=1).order_by('-timestamp')\n paginator = Paginator(queryset, 18)\n if 'page' in request.GET and request.GET['page']:\n my_page = int(request.GET['page'])\n else:\n my_page = 1\n try:\n object_list = paginator.page(my_page)\n except PageNotAnInteger:\n object_list = paginator.page(1)\n except EmptyPage:\n object_list = paginator.page(paginator.num_pages)\n categories = VideoCategory.objects.all()\n ctx = {'object_list': object_list, 'categories': categories, 'page_obj': object_list, 'recent_playlist': self.get_recent_playlist(), 'queryset': search_words}\n return render(request, self.template_name, ctx)\n", "source": "the_stack_v2_python_sparse", "source_path": "pythonizame/apps/videos/views.py", "source_repo": "fabianfalon/pythonizame", "split": "test", "star_events_count": 0} {"blob_id": "a8567538d94a29cb7f591ae9c0302a1304af9bd3", "bodies": ["n = len(nums)\nif n == 0:\n return 0\nif n == 1:\n return nums[0]\nvisited = set()\nmax_res = 0\nres = 0\nfor i in range(n):\n for j in range(i, n):\n if nums[j] not in visited:\n max_res += nums[j]\n visited.add(nums[j])\n else:\n res = max(max_res, res)\n max_res = 0\n visited.clear()\n break\nreturn res", "n = len(nums)\nif n == 0:\n return 0\nif n == 1:\n return nums[0]\ncounters = [0] * 10001\ncur = 0\nres = 0\nl, r = (0, 0)\nwhile r < n:\n cur += nums[r]\n counters[nums[r]] += 1\n while l < r and counters[nums[r]] > 1:\n cur -= nums[l]\n counters[nums[l]] -= 1\n l += 1\n res = max(res, cur)\n r += 1\nreturn res"], "bodies_text": "<|body_start_0|>\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n visited = set()\n max_res = 0\n res = 0\n for i in range(n):\n for j in range(i, n):\n if nums[j] not in visited:\n max_res += nums[j]\n visited.add(nums[j])\n else:\n res = max(max_res, res)\n max_res = 0\n visited.clear()\n break\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n counters = [0] * 10001\n cur = 0\n res = 0\n l, r = (0, 0)\n while r < n:\n cur += nums[r]\n counters[nums[r]] += 1\n while l < r and counters[nums[r]] > 1:\n cur -= nums[l]\n counters[nums[l]] -= 1\n l += 1\n res = max(res, cur)\n r += 1\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maximumUniqueSubarray(self, nums: List[int]) -> int:\n \"\"\"直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:\"\"\"\n <|body_0|>\n\n def maximumUniqueSubarray2(self, nums: List[int]) -> int:\n \"\"\"滑动窗口模板 直接套就行了 :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n visited = set()\n max_res = 0\n res = 0\n for i in range(n):\n for j in range(i, n):\n if nums[j] not in visited:\n max_res += nums[j]\n visited.add(nums[j])\n else:\n res = max(max_res, res)\n max_res = 0\n visited.clear()\n break\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n counters = [0] * 10001\n cur = 0\n res = 0\n l, r = (0, 0)\n while r < n:\n cur += nums[r]\n counters[nums[r]] += 1\n while l < r and counters[nums[r]] > 1:\n cur -= nums[l]\n counters[nums[l]] -= 1\n l += 1\n res = max(res, cur)\n r += 1\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000219", "length_bytes": 1618, "license_type": "no_license", "methods": [{"docstring": "直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:", "name": "maximumUniqueSubarray", "signature": "def maximumUniqueSubarray(self, nums: List[int]) -> int"}, {"docstring": "滑动窗口模板 直接套就行了 :param nums: :return:", "name": "maximumUniqueSubarray2", "signature": "def maximumUniqueSubarray2(self, nums: List[int]) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032271", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maximumUniqueSubarray(self, nums: List[int]) -> int: 直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:\n- def maximumUniqueSubarray2(self, nums: List[int]) -> int: 滑动窗口模板 直接套就行了 :param nums: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maximumUniqueSubarray(self, nums: List[int]) -> int: 直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:\n- def maximumUniqueSubarray2(self, nums: List[int]) -> int: 滑动窗口模板 直接套就行了 :param nums: :return:\n\n<|skeleton|>\nclass Solution:\n\n def maximumUniqueSubarray(self, nums: List[int]) -> int:\n \"\"\"直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:\"\"\"\n <|body_0|>\n\n def maximumUniqueSubarray2(self, nums: List[int]) -> int:\n \"\"\"滑动窗口模板 直接套就行了 :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n visited = set()\n max_res = 0\n res = 0\n for i in range(n):\n for j in range(i, n):\n if nums[j] not in visited:\n max_res += nums[j]\n visited.add(nums[j])\n else:\n res = max(max_res, res)\n max_res = 0\n visited.clear()\n break\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n counters = [0] * 10001\n cur = 0\n res = 0\n l, r = (0, 0)\n while r < n:\n cur += nums[r]\n counters[nums[r]] += 1\n while l < r and counters[nums[r]] > 1:\n cur -= nums[l]\n counters[nums[l]] -= 1\n l += 1\n res = max(res, cur)\n r += 1\n return res\n<|end_body_1|>\n", "revision_id": "578cacff5851c5c2522981693c34e3c318002d30", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maximumUniqueSubarray(self, nums: List[int]) -> int:\n \"\"\"直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:\"\"\"\n <|body_0|>\n\n def maximumUniqueSubarray2(self, nums: List[int]) -> int:\n \"\"\"滑动窗口模板 直接套就行了 :param nums: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maximumUniqueSubarray(self, nums: List[int]) -> int:\n \"\"\"直接暴力法硬怼 毫无疑问超时了 因为题目给出了数据规模在 10^5 :param nums: :return:\"\"\"\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n visited = set()\n max_res = 0\n res = 0\n for i in range(n):\n for j in range(i, n):\n if nums[j] not in visited:\n max_res += nums[j]\n visited.add(nums[j])\n else:\n res = max(max_res, res)\n max_res = 0\n visited.clear()\n break\n return res\n\n def maximumUniqueSubarray2(self, nums: List[int]) -> int:\n \"\"\"滑动窗口模板 直接套就行了 :param nums: :return:\"\"\"\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n counters = [0] * 10001\n cur = 0\n res = 0\n l, r = (0, 0)\n while r < n:\n cur += nums[r]\n counters[nums[r]] += 1\n while l < r and counters[nums[r]] > 1:\n cur -= nums[l]\n counters[nums[l]] -= 1\n l += 1\n res = max(res, cur)\n r += 1\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "LeetCode周赛/2020-12-20第220场周赛/删除子数组的最大得分.py", "source_repo": "cjrzs/MyLeetCode", "split": "test", "star_events_count": 8} {"blob_id": "a468824c0dfd33950fe7cdd1205dafee7f1e318a", "bodies": ["columns = 'id,entry name,genes(PREFERRED),protein names,sequence,length,mass,ec,database(GeneID),reviewed,organism-id,database(KO),genes(ALTERNATIVE),genes(ORF),genes(OLN),database(EMBL),database(RefSeq),database(KEGG)'\nsuffix = 'query={}&sort=score&columns={}format={}&include={}&compress={}&limit={}&offset={}'.format(query, columns, 'html', include, compress, limit, offset)\nurl = api + suffix\nresponse = requests.get(url)\nself.soup = BeautifulSoup(response.content, 'html.parser')", "rx = re.compile('.*dbget-bin.*')\nresult = self.soup.find_all(href=rx)\nif result != []:\n return result[0].get_text()\nelse:\n return None", "result = self.soup.find_all(class_='basket-item namespace-uniprot')\nif result != []:\n return result[0]['id'].split('_')[1]\nelse:\n return None", "result = []\ntmp = self.soup.find_all(class_='protein_names')\nif tmp != []:\n result.append(tmp[0].div['title'])\n alt_names = tmp[0].div.next_sibling.get_text()\n atl_name_split = alt_names.strip().split(') (')\n for x in atl_name_split:\n for y in x.split(','):\n result.append(y.strip())\n return result\nelse:\n return None"], "bodies_text": "<|body_start_0|>\n columns = 'id,entry name,genes(PREFERRED),protein names,sequence,length,mass,ec,database(GeneID),reviewed,organism-id,database(KO),genes(ALTERNATIVE),genes(ORF),genes(OLN),database(EMBL),database(RefSeq),database(KEGG)'\n suffix = 'query={}&sort=score&columns={}format={}&include={}&compress={}&limit={}&offset={}'.format(query, columns, 'html', include, compress, limit, offset)\n url = api + suffix\n response = requests.get(url)\n self.soup = BeautifulSoup(response.content, 'html.parser')\n<|end_body_0|>\n\n<|body_start_1|>\n rx = re.compile('.*dbget-bin.*')\n result = self.soup.find_all(href=rx)\n if result != []:\n return result[0].get_text()\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n result = self.soup.find_all(class_='basket-item namespace-uniprot')\n if result != []:\n return result[0]['id'].split('_')[1]\n else:\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n result = []\n tmp = self.soup.find_all(class_='protein_names')\n if tmp != []:\n result.append(tmp[0].div['title'])\n alt_names = tmp[0].div.next_sibling.get_text()\n atl_name_split = alt_names.strip().split(') (')\n for x in atl_name_split:\n for y in x.split(','):\n result.append(y.strip())\n return result\n else:\n return None\n<|end_body_3|>\n", "class_docstring": "", "class_name": "QueryUniprotOrg", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QueryUniprotOrg:\n\n def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0):\n \"\"\"Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.\"\"\"\n <|body_0|>\n\n def get_kegg_ortholog(self):\n \"\"\"Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number\"\"\"\n <|body_1|>\n\n def get_uniprot_id(self):\n \"\"\"Get uniprot id. Return: (:obj:`str`): uniprot id\"\"\"\n <|body_2|>\n\n def get_protein_name(self):\n \"\"\"Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n columns = 'id,entry name,genes(PREFERRED),protein names,sequence,length,mass,ec,database(GeneID),reviewed,organism-id,database(KO),genes(ALTERNATIVE),genes(ORF),genes(OLN),database(EMBL),database(RefSeq),database(KEGG)'\n suffix = 'query={}&sort=score&columns={}format={}&include={}&compress={}&limit={}&offset={}'.format(query, columns, 'html', include, compress, limit, offset)\n url = api + suffix\n response = requests.get(url)\n self.soup = BeautifulSoup(response.content, 'html.parser')\n<|end_body_0|>\n\n<|body_start_1|>\n rx = re.compile('.*dbget-bin.*')\n result = self.soup.find_all(href=rx)\n if result != []:\n return result[0].get_text()\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n result = self.soup.find_all(class_='basket-item namespace-uniprot')\n if result != []:\n return result[0]['id'].split('_')[1]\n else:\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n result = []\n tmp = self.soup.find_all(class_='protein_names')\n if tmp != []:\n result.append(tmp[0].div['title'])\n alt_names = tmp[0].div.next_sibling.get_text()\n atl_name_split = alt_names.strip().split(') (')\n for x in atl_name_split:\n for y in x.split(','):\n result.append(y.strip())\n return result\n else:\n return None\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000220", "length_bytes": 2634, "license_type": "permissive", "methods": [{"docstring": "Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.", "name": "__init__", "signature": "def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0)"}, {"docstring": "Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number", "name": "get_kegg_ortholog", "signature": "def get_kegg_ortholog(self)"}, {"docstring": "Get uniprot id. Return: (:obj:`str`): uniprot id", "name": "get_uniprot_id", "signature": "def get_uniprot_id(self)"}, {"docstring": "Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.", "name": "get_protein_name", "signature": "def get_protein_name(self)"}], "n_methods": 4, "prompt": "Implement the Python class `QueryUniprotOrg` described below.\n\nClass description:\nImplement the QueryUniprotOrg class.\n\nMethod signatures and docstrings:\n- def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0): Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.\n- def get_kegg_ortholog(self): Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number\n- def get_uniprot_id(self): Get uniprot id. Return: (:obj:`str`): uniprot id\n- def get_protein_name(self): Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.", "prompted_full_text": "Implement the Python class `QueryUniprotOrg` described below.\n\nClass description:\nImplement the QueryUniprotOrg class.\n\nMethod signatures and docstrings:\n- def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0): Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.\n- def get_kegg_ortholog(self): Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number\n- def get_uniprot_id(self): Get uniprot id. Return: (:obj:`str`): uniprot id\n- def get_protein_name(self): Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.\n\n<|skeleton|>\nclass QueryUniprotOrg:\n\n def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0):\n \"\"\"Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.\"\"\"\n <|body_0|>\n\n def get_kegg_ortholog(self):\n \"\"\"Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number\"\"\"\n <|body_1|>\n\n def get_uniprot_id(self):\n \"\"\"Get uniprot id. Return: (:obj:`str`): uniprot id\"\"\"\n <|body_2|>\n\n def get_protein_name(self):\n \"\"\"Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n columns = 'id,entry name,genes(PREFERRED),protein names,sequence,length,mass,ec,database(GeneID),reviewed,organism-id,database(KO),genes(ALTERNATIVE),genes(ORF),genes(OLN),database(EMBL),database(RefSeq),database(KEGG)'\n suffix = 'query={}&sort=score&columns={}format={}&include={}&compress={}&limit={}&offset={}'.format(query, columns, 'html', include, compress, limit, offset)\n url = api + suffix\n response = requests.get(url)\n self.soup = BeautifulSoup(response.content, 'html.parser')\n<|end_body_0|>\n\n<|body_start_1|>\n rx = re.compile('.*dbget-bin.*')\n result = self.soup.find_all(href=rx)\n if result != []:\n return result[0].get_text()\n else:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n result = self.soup.find_all(class_='basket-item namespace-uniprot')\n if result != []:\n return result[0]['id'].split('_')[1]\n else:\n return None\n<|end_body_2|>\n\n<|body_start_3|>\n result = []\n tmp = self.soup.find_all(class_='protein_names')\n if tmp != []:\n result.append(tmp[0].div['title'])\n alt_names = tmp[0].div.next_sibling.get_text()\n atl_name_split = alt_names.strip().split(') (')\n for x in atl_name_split:\n for y in x.split(','):\n result.append(y.strip())\n return result\n else:\n return None\n<|end_body_3|>\n", "revision_id": "4190a08cbe518ba37f4a44abe5f79961c54bc15b", "skeleton": "<|skeleton|>\nclass QueryUniprotOrg:\n\n def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0):\n \"\"\"Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.\"\"\"\n <|body_0|>\n\n def get_kegg_ortholog(self):\n \"\"\"Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number\"\"\"\n <|body_1|>\n\n def get_uniprot_id(self):\n \"\"\"Get uniprot id. Return: (:obj:`str`): uniprot id\"\"\"\n <|body_2|>\n\n def get_protein_name(self):\n \"\"\"Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QueryUniprotOrg:\n def __init__(self, query, api='https://www.uniprot.org/uniprot/?', include='yes', compress='no', limit=1, offset=0):\n \"\"\"Init Args: query (:obj:`str`): query message. url (:obj:`int`, optional): API url. include (:obj:`str`, optional): See description in link. Defaults to 'yes'. compress (:obj:`str`, optional): Return results gzipped. Defaults to 'no'. limit (:obj:`int`, optional): Max number of results to return. Defaults to 1. offset (:obj:`int`, optional): Offset of the first result. Defaults to 0.\"\"\"\n columns = 'id,entry name,genes(PREFERRED),protein names,sequence,length,mass,ec,database(GeneID),reviewed,organism-id,database(KO),genes(ALTERNATIVE),genes(ORF),genes(OLN),database(EMBL),database(RefSeq),database(KEGG)'\n suffix = 'query={}&sort=score&columns={}format={}&include={}&compress={}&limit={}&offset={}'.format(query, columns, 'html', include, compress, limit, offset)\n url = api + suffix\n response = requests.get(url)\n self.soup = BeautifulSoup(response.content, 'html.parser')\n\n def get_kegg_ortholog(self):\n \"\"\"Get kegg ortholog information using query message. Return: (:obj:`str`): kegg ortholog number\"\"\"\n rx = re.compile('.*dbget-bin.*')\n result = self.soup.find_all(href=rx)\n if result != []:\n return result[0].get_text()\n else:\n return None\n\n def get_uniprot_id(self):\n \"\"\"Get uniprot id. Return: (:obj:`str`): uniprot id\"\"\"\n result = self.soup.find_all(class_='basket-item namespace-uniprot')\n if result != []:\n return result[0]['id'].split('_')[1]\n else:\n return None\n\n def get_protein_name(self):\n \"\"\"Get protein name. Return: (:obj:`list` of :obj:`str`): list of protein names.\"\"\"\n result = []\n tmp = self.soup.find_all(class_='protein_names')\n if tmp != []:\n result.append(tmp[0].div['title'])\n alt_names = tmp[0].div.next_sibling.get_text()\n atl_name_split = alt_names.strip().split(') (')\n for x in atl_name_split:\n for y in x.split(','):\n result.append(y.strip())\n return result\n else:\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "datanator_query_python/query/query_uniprot_org.py", "source_repo": "KarrLab/datanator_query_python", "split": "test", "star_events_count": 0} {"blob_id": "62b33bf62ce4d87ded30504248ac66f7871778a0", "bodies": ["if namespace is None:\n self.use_main_ns = 1\nelse:\n self.use_main_ns = 0\n self.namespace = namespace\nif global_namespace is None:\n self.global_namespace = {}\nelse:\n self.global_namespace = global_namespace", "if self.use_main_ns:\n raise RuntimeError('Namespace must be provided!')\nif '.' in text:\n return self.attr_matches(text)\nelse:\n return self.global_matches(text)", "def get_item(obj, attr):\n return obj[attr]\na = {}\nfiltered_builtin = {}\nfor key, val in dict_iter_items(__builtin__.__dict__):\n if not (val is True or val is False or val is None):\n filtered_builtin[key] = val\nfor dict_with_comps in [filtered_builtin, self.namespace, self.global_namespace]:\n a.update(dict_with_comps)\nfilter = _StartsWithFilter(text)\nreturn dir2(a, a.keys(), get_item, filter)", "import re\nm = re.match('(\\\\S+(\\\\.\\\\w+)*)\\\\.(\\\\w*)$', text)\nif not m:\n return []\nexpr, attr = m.group(1, 3)\ntry:\n obj = eval(expr, self.namespace)\nexcept:\n try:\n obj = eval(expr, self.global_namespace)\n except:\n return []\nfilter = _StartsWithFilter(attr)\nwords = dir2(obj, filter=filter)\nreturn words"], "bodies_text": "<|body_start_0|>\n if namespace is None:\n self.use_main_ns = 1\n else:\n self.use_main_ns = 0\n self.namespace = namespace\n if global_namespace is None:\n self.global_namespace = {}\n else:\n self.global_namespace = global_namespace\n<|end_body_0|>\n\n<|body_start_1|>\n if self.use_main_ns:\n raise RuntimeError('Namespace must be provided!')\n if '.' in text:\n return self.attr_matches(text)\n else:\n return self.global_matches(text)\n<|end_body_1|>\n\n<|body_start_2|>\n def get_item(obj, attr):\n return obj[attr]\n a = {}\n filtered_builtin = {}\n for key, val in dict_iter_items(__builtin__.__dict__):\n if not (val is True or val is False or val is None):\n filtered_builtin[key] = val\n for dict_with_comps in [filtered_builtin, self.namespace, self.global_namespace]:\n a.update(dict_with_comps)\n filter = _StartsWithFilter(text)\n return dir2(a, a.keys(), get_item, filter)\n<|end_body_2|>\n\n<|body_start_3|>\n import re\n m = re.match('(\\\\S+(\\\\.\\\\w+)*)\\\\.(\\\\w*)$', text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n try:\n obj = eval(expr, self.namespace)\n except:\n try:\n obj = eval(expr, self.global_namespace)\n except:\n return []\n filter = _StartsWithFilter(attr)\n words = dir2(obj, filter=filter)\n return words\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Completer", "detected_licenses": ["EPL-1.0", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Completer:\n\n def __init__(self, namespace=None, global_namespace=None):\n \"\"\"Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)\"\"\"\n <|body_0|>\n\n def complete(self, text):\n \"\"\"Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.\"\"\"\n <|body_1|>\n\n def global_matches(self, text):\n \"\"\"Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.\"\"\"\n <|body_2|>\n\n def attr_matches(self, text):\n \"\"\"Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if namespace is None:\n self.use_main_ns = 1\n else:\n self.use_main_ns = 0\n self.namespace = namespace\n if global_namespace is None:\n self.global_namespace = {}\n else:\n self.global_namespace = global_namespace\n<|end_body_0|>\n\n<|body_start_1|>\n if self.use_main_ns:\n raise RuntimeError('Namespace must be provided!')\n if '.' in text:\n return self.attr_matches(text)\n else:\n return self.global_matches(text)\n<|end_body_1|>\n\n<|body_start_2|>\n def get_item(obj, attr):\n return obj[attr]\n a = {}\n filtered_builtin = {}\n for key, val in dict_iter_items(__builtin__.__dict__):\n if not (val is True or val is False or val is None):\n filtered_builtin[key] = val\n for dict_with_comps in [filtered_builtin, self.namespace, self.global_namespace]:\n a.update(dict_with_comps)\n filter = _StartsWithFilter(text)\n return dir2(a, a.keys(), get_item, filter)\n<|end_body_2|>\n\n<|body_start_3|>\n import re\n m = re.match('(\\\\S+(\\\\.\\\\w+)*)\\\\.(\\\\w*)$', text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n try:\n obj = eval(expr, self.namespace)\n except:\n try:\n obj = eval(expr, self.global_namespace)\n except:\n return []\n filter = _StartsWithFilter(attr)\n words = dir2(obj, filter=filter)\n return words\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000221", "length_bytes": 6762, "license_type": "permissive", "methods": [{"docstring": "Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)", "name": "__init__", "signature": "def __init__(self, namespace=None, global_namespace=None)"}, {"docstring": "Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.", "name": "complete", "signature": "def complete(self, text)"}, {"docstring": "Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.", "name": "global_matches", "signature": "def global_matches(self, text)"}, {"docstring": "Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.", "name": "attr_matches", "signature": "def attr_matches(self, text)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_015713", "prompt": "Implement the Python class `Completer` described below.\n\nClass description:\nImplement the Completer class.\n\nMethod signatures and docstrings:\n- def __init__(self, namespace=None, global_namespace=None): Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)\n- def complete(self, text): Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.\n- def global_matches(self, text): Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.\n- def attr_matches(self, text): Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.", "prompted_full_text": "Implement the Python class `Completer` described below.\n\nClass description:\nImplement the Completer class.\n\nMethod signatures and docstrings:\n- def __init__(self, namespace=None, global_namespace=None): Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)\n- def complete(self, text): Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.\n- def global_matches(self, text): Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.\n- def attr_matches(self, text): Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.\n\n<|skeleton|>\nclass Completer:\n\n def __init__(self, namespace=None, global_namespace=None):\n \"\"\"Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)\"\"\"\n <|body_0|>\n\n def complete(self, text):\n \"\"\"Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.\"\"\"\n <|body_1|>\n\n def global_matches(self, text):\n \"\"\"Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.\"\"\"\n <|body_2|>\n\n def attr_matches(self, text):\n \"\"\"Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if namespace is None:\n self.use_main_ns = 1\n else:\n self.use_main_ns = 0\n self.namespace = namespace\n if global_namespace is None:\n self.global_namespace = {}\n else:\n self.global_namespace = global_namespace\n<|end_body_0|>\n\n<|body_start_1|>\n if self.use_main_ns:\n raise RuntimeError('Namespace must be provided!')\n if '.' in text:\n return self.attr_matches(text)\n else:\n return self.global_matches(text)\n<|end_body_1|>\n\n<|body_start_2|>\n def get_item(obj, attr):\n return obj[attr]\n a = {}\n filtered_builtin = {}\n for key, val in dict_iter_items(__builtin__.__dict__):\n if not (val is True or val is False or val is None):\n filtered_builtin[key] = val\n for dict_with_comps in [filtered_builtin, self.namespace, self.global_namespace]:\n a.update(dict_with_comps)\n filter = _StartsWithFilter(text)\n return dir2(a, a.keys(), get_item, filter)\n<|end_body_2|>\n\n<|body_start_3|>\n import re\n m = re.match('(\\\\S+(\\\\.\\\\w+)*)\\\\.(\\\\w*)$', text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n try:\n obj = eval(expr, self.namespace)\n except:\n try:\n obj = eval(expr, self.global_namespace)\n except:\n return []\n filter = _StartsWithFilter(attr)\n words = dir2(obj, filter=filter)\n return words\n<|end_body_3|>\n", "revision_id": "05dbd4575d01a213f3f4d69aa4968473f2536142", "skeleton": "<|skeleton|>\nclass Completer:\n\n def __init__(self, namespace=None, global_namespace=None):\n \"\"\"Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)\"\"\"\n <|body_0|>\n\n def complete(self, text):\n \"\"\"Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.\"\"\"\n <|body_1|>\n\n def global_matches(self, text):\n \"\"\"Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.\"\"\"\n <|body_2|>\n\n def attr_matches(self, text):\n \"\"\"Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Completer:\n def __init__(self, namespace=None, global_namespace=None):\n \"\"\"Create a new completer for the command line. Completer([namespace,global_namespace]) -> completer instance. If unspecified, the default namespace where completions are performed is __main__ (technically, __main__.__dict__). Namespaces should be given as dictionaries. An optional second namespace can be given. This allows the completer to handle cases where both the local and global scopes need to be distinguished. Completer instances should be used as the completion mechanism of readline via the set_completer() call: readline.set_completer(Completer(my_namespace).complete)\"\"\"\n if namespace is None:\n self.use_main_ns = 1\n else:\n self.use_main_ns = 0\n self.namespace = namespace\n if global_namespace is None:\n self.global_namespace = {}\n else:\n self.global_namespace = global_namespace\n\n def complete(self, text):\n \"\"\"Return the next possible completion for 'text'. This is called successively with state == 0, 1, 2, ... until it returns None. The completion should begin with 'text'.\"\"\"\n if self.use_main_ns:\n raise RuntimeError('Namespace must be provided!')\n if '.' in text:\n return self.attr_matches(text)\n else:\n return self.global_matches(text)\n\n def global_matches(self, text):\n \"\"\"Compute matches when text is a simple name. Return a list of all keywords, built-in functions and names currently defined in self.namespace or self.global_namespace that match.\"\"\"\n def get_item(obj, attr):\n return obj[attr]\n a = {}\n filtered_builtin = {}\n for key, val in dict_iter_items(__builtin__.__dict__):\n if not (val is True or val is False or val is None):\n filtered_builtin[key] = val\n for dict_with_comps in [filtered_builtin, self.namespace, self.global_namespace]:\n a.update(dict_with_comps)\n filter = _StartsWithFilter(text)\n return dir2(a, a.keys(), get_item, filter)\n\n def attr_matches(self, text):\n \"\"\"Compute matches when text contains a dot. Assuming the text is of the form NAME.NAME....[NAME], and is evaluatable in self.namespace or self.global_namespace, it will be evaluated and its attributes (as revealed by dir()) are used as possible completions. (For class instances, class members are are also considered.) WARNING: this can still invoke arbitrary C code, if an object with a __getattr__ hook is evaluated.\"\"\"\n import re\n m = re.match('(\\\\S+(\\\\.\\\\w+)*)\\\\.(\\\\w*)$', text)\n if not m:\n return []\n expr, attr = m.group(1, 3)\n try:\n obj = eval(expr, self.namespace)\n except:\n try:\n obj = eval(expr, self.global_namespace)\n except:\n return []\n filter = _StartsWithFilter(attr)\n words = dir2(obj, filter=filter)\n return words\n", "source": "the_stack_v2_python_sparse", "source_path": "python/helpers/pydev/_pydev_bundle/_pydev_completer.py", "source_repo": "JetBrains/intellij-community", "split": "test", "star_events_count": 16288} {"blob_id": "020cd56919ca9b89d12dd56b05add8f5ef7ebd93", "bodies": ["self.capabilities = capabilities\nself.concurrency = concurrency\nself.host_type = host_type\nself.hosts = hosts\nself.local_mount_dir = local_mount_dir\nself.mount_view = mount_view\nself.mounts = mounts\nself.preferred_control_nodes = preferred_control_nodes\nself.restore_args = restore_args\nself.restore_job_arguments_map = restore_job_arguments_map\nself.run_start_time_usecs = run_start_time_usecs\nself.script_dir = script_dir\nself.source_args = source_args\nself.source_arguments_map = source_arguments_map\nself.source_type = source_type\nself.uda_s3_view_backup_properties = uda_s3_view_backup_properties\nself.use_s3_view = use_s3_view", "if dictionary is None:\n return None\ncapabilities = cohesity_management_sdk.models.uda_source_capabilities.UdaSourceCapabilities.from_dictionary(dictionary.get('capabilities')) if dictionary.get('capabilities') else None\nconcurrency = dictionary.get('concurrency')\nhost_type = dictionary.get('hostType')\nhosts = dictionary.get('hosts')\nlocal_mount_dir = dictionary.get('localMountDir')\nmount_view = dictionary.get('mountView')\nmounts = dictionary.get('mounts')\npreferred_control_nodes = dictionary.get('preferredControlNodes')\nrestore_args = dictionary.get('restoreArgs')\nrestore_job_arguments_map = None\nif dictionary.get('restoreJobArgumentsMap') != None:\n restore_job_arguments_map = list()\n for structure in dictionary.get('restoreJobArgumentsMap'):\n restore_job_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_restore_job_arguments_map_entry.UdaRecoverJobParams_RestoreJobArgumentsMapEntry.from_dictionary(structure))\nrun_start_time_usecs = dictionary.get('runStartTimeUsecs')\nscript_dir = dictionary.get('scriptDir')\nsource_args = dictionary.get('sourceArgs')\nsource_arguments_map = None\nif dictionary.get('sourceArgumentsMap') != None:\n source_arguments_map = list()\n for structure in dictionary.get('sourceArgumentsMap'):\n source_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_source_arguments_map_entry.UdaRecoverJobParams_SourceArgumentsMapEntry.from_dictionary(structure))\nsource_type = dictionary.get('sourceType')\nuda_s3_view_backup_properties = cohesity_management_sdk.models.uda_s3_view_backup_properties.UdaS3ViewBackupProperties.from_dictionary(dictionary.get('udaS3ViewBackupProperties')) if dictionary.get('udaS3ViewBackupProperties') else None\nuse_s3_view = dictionary.get('useS3View')\nreturn cls(capabilities, concurrency, host_type, hosts, local_mount_dir, mount_view, mounts, preferred_control_nodes, restore_args, restore_job_arguments_map, run_start_time_usecs, script_dir, source_args, source_arguments_map, source_type, uda_s3_view_backup_properties, use_s3_view)"], "bodies_text": "<|body_start_0|>\n self.capabilities = capabilities\n self.concurrency = concurrency\n self.host_type = host_type\n self.hosts = hosts\n self.local_mount_dir = local_mount_dir\n self.mount_view = mount_view\n self.mounts = mounts\n self.preferred_control_nodes = preferred_control_nodes\n self.restore_args = restore_args\n self.restore_job_arguments_map = restore_job_arguments_map\n self.run_start_time_usecs = run_start_time_usecs\n self.script_dir = script_dir\n self.source_args = source_args\n self.source_arguments_map = source_arguments_map\n self.source_type = source_type\n self.uda_s3_view_backup_properties = uda_s3_view_backup_properties\n self.use_s3_view = use_s3_view\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n capabilities = cohesity_management_sdk.models.uda_source_capabilities.UdaSourceCapabilities.from_dictionary(dictionary.get('capabilities')) if dictionary.get('capabilities') else None\n concurrency = dictionary.get('concurrency')\n host_type = dictionary.get('hostType')\n hosts = dictionary.get('hosts')\n local_mount_dir = dictionary.get('localMountDir')\n mount_view = dictionary.get('mountView')\n mounts = dictionary.get('mounts')\n preferred_control_nodes = dictionary.get('preferredControlNodes')\n restore_args = dictionary.get('restoreArgs')\n restore_job_arguments_map = None\n if dictionary.get('restoreJobArgumentsMap') != None:\n restore_job_arguments_map = list()\n for structure in dictionary.get('restoreJobArgumentsMap'):\n restore_job_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_restore_job_arguments_map_entry.UdaRecoverJobParams_RestoreJobArgumentsMapEntry.from_dictionary(structure))\n run_start_time_usecs = dictionary.get('runStartTimeUsecs')\n script_dir = dictionary.get('scriptDir')\n source_args = dictionary.get('sourceArgs')\n source_arguments_map = None\n if dictionary.get('sourceArgumentsMap') != None:\n source_arguments_map = list()\n for structure in dictionary.get('sourceArgumentsMap'):\n source_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_source_arguments_map_entry.UdaRecoverJobParams_SourceArgumentsMapEntry.from_dictionary(structure))\n source_type = dictionary.get('sourceType')\n uda_s3_view_backup_properties = cohesity_management_sdk.models.uda_s3_view_backup_properties.UdaS3ViewBackupProperties.from_dictionary(dictionary.get('udaS3ViewBackupProperties')) if dictionary.get('udaS3ViewBackupProperties') else None\n use_s3_view = dictionary.get('useS3View')\n return cls(capabilities, concurrency, host_type, hosts, local_mount_dir, mount_view, mounts, preferred_control_nodes, restore_args, restore_job_arguments_map, run_start_time_usecs, script_dir, source_args, source_arguments_map, source_type, uda_s3_view_backup_properties, use_s3_view)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op", "class_name": "UdaRecoverJobParams", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UdaRecoverJobParams:\n \"\"\"Implementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op\"\"\"\n\n def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None):\n \"\"\"Constructor for the UdaRecoverJobParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capabilities = capabilities\n self.concurrency = concurrency\n self.host_type = host_type\n self.hosts = hosts\n self.local_mount_dir = local_mount_dir\n self.mount_view = mount_view\n self.mounts = mounts\n self.preferred_control_nodes = preferred_control_nodes\n self.restore_args = restore_args\n self.restore_job_arguments_map = restore_job_arguments_map\n self.run_start_time_usecs = run_start_time_usecs\n self.script_dir = script_dir\n self.source_args = source_args\n self.source_arguments_map = source_arguments_map\n self.source_type = source_type\n self.uda_s3_view_backup_properties = uda_s3_view_backup_properties\n self.use_s3_view = use_s3_view\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n capabilities = cohesity_management_sdk.models.uda_source_capabilities.UdaSourceCapabilities.from_dictionary(dictionary.get('capabilities')) if dictionary.get('capabilities') else None\n concurrency = dictionary.get('concurrency')\n host_type = dictionary.get('hostType')\n hosts = dictionary.get('hosts')\n local_mount_dir = dictionary.get('localMountDir')\n mount_view = dictionary.get('mountView')\n mounts = dictionary.get('mounts')\n preferred_control_nodes = dictionary.get('preferredControlNodes')\n restore_args = dictionary.get('restoreArgs')\n restore_job_arguments_map = None\n if dictionary.get('restoreJobArgumentsMap') != None:\n restore_job_arguments_map = list()\n for structure in dictionary.get('restoreJobArgumentsMap'):\n restore_job_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_restore_job_arguments_map_entry.UdaRecoverJobParams_RestoreJobArgumentsMapEntry.from_dictionary(structure))\n run_start_time_usecs = dictionary.get('runStartTimeUsecs')\n script_dir = dictionary.get('scriptDir')\n source_args = dictionary.get('sourceArgs')\n source_arguments_map = None\n if dictionary.get('sourceArgumentsMap') != None:\n source_arguments_map = list()\n for structure in dictionary.get('sourceArgumentsMap'):\n source_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_source_arguments_map_entry.UdaRecoverJobParams_SourceArgumentsMapEntry.from_dictionary(structure))\n source_type = dictionary.get('sourceType')\n uda_s3_view_backup_properties = cohesity_management_sdk.models.uda_s3_view_backup_properties.UdaS3ViewBackupProperties.from_dictionary(dictionary.get('udaS3ViewBackupProperties')) if dictionary.get('udaS3ViewBackupProperties') else None\n use_s3_view = dictionary.get('useS3View')\n return cls(capabilities, concurrency, host_type, hosts, local_mount_dir, mount_view, mounts, preferred_control_nodes, restore_args, restore_job_arguments_map, run_start_time_usecs, script_dir, source_args, source_arguments_map, source_type, uda_s3_view_backup_properties, use_s3_view)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000222", "length_bytes": 7887, "license_type": "permissive", "methods": [{"docstring": "Constructor for the UdaRecoverJobParams class", "name": "__init__", "signature": "def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052923", "prompt": "Implement the Python class `UdaRecoverJobParams` described below.\n\nClass description:\nImplementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op\n\nMethod signatures and docstrings:\n- def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None): Constructor for the UdaRecoverJobParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `UdaRecoverJobParams` described below.\n\nClass description:\nImplementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op\n\nMethod signatures and docstrings:\n- def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None): Constructor for the UdaRecoverJobParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass UdaRecoverJobParams:\n \"\"\"Implementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op\"\"\"\n\n def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None):\n \"\"\"Constructor for the UdaRecoverJobParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capabilities = capabilities\n self.concurrency = concurrency\n self.host_type = host_type\n self.hosts = hosts\n self.local_mount_dir = local_mount_dir\n self.mount_view = mount_view\n self.mounts = mounts\n self.preferred_control_nodes = preferred_control_nodes\n self.restore_args = restore_args\n self.restore_job_arguments_map = restore_job_arguments_map\n self.run_start_time_usecs = run_start_time_usecs\n self.script_dir = script_dir\n self.source_args = source_args\n self.source_arguments_map = source_arguments_map\n self.source_type = source_type\n self.uda_s3_view_backup_properties = uda_s3_view_backup_properties\n self.use_s3_view = use_s3_view\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n capabilities = cohesity_management_sdk.models.uda_source_capabilities.UdaSourceCapabilities.from_dictionary(dictionary.get('capabilities')) if dictionary.get('capabilities') else None\n concurrency = dictionary.get('concurrency')\n host_type = dictionary.get('hostType')\n hosts = dictionary.get('hosts')\n local_mount_dir = dictionary.get('localMountDir')\n mount_view = dictionary.get('mountView')\n mounts = dictionary.get('mounts')\n preferred_control_nodes = dictionary.get('preferredControlNodes')\n restore_args = dictionary.get('restoreArgs')\n restore_job_arguments_map = None\n if dictionary.get('restoreJobArgumentsMap') != None:\n restore_job_arguments_map = list()\n for structure in dictionary.get('restoreJobArgumentsMap'):\n restore_job_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_restore_job_arguments_map_entry.UdaRecoverJobParams_RestoreJobArgumentsMapEntry.from_dictionary(structure))\n run_start_time_usecs = dictionary.get('runStartTimeUsecs')\n script_dir = dictionary.get('scriptDir')\n source_args = dictionary.get('sourceArgs')\n source_arguments_map = None\n if dictionary.get('sourceArgumentsMap') != None:\n source_arguments_map = list()\n for structure in dictionary.get('sourceArgumentsMap'):\n source_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_source_arguments_map_entry.UdaRecoverJobParams_SourceArgumentsMapEntry.from_dictionary(structure))\n source_type = dictionary.get('sourceType')\n uda_s3_view_backup_properties = cohesity_management_sdk.models.uda_s3_view_backup_properties.UdaS3ViewBackupProperties.from_dictionary(dictionary.get('udaS3ViewBackupProperties')) if dictionary.get('udaS3ViewBackupProperties') else None\n use_s3_view = dictionary.get('useS3View')\n return cls(capabilities, concurrency, host_type, hosts, local_mount_dir, mount_view, mounts, preferred_control_nodes, restore_args, restore_job_arguments_map, run_start_time_usecs, script_dir, source_args, source_arguments_map, source_type, uda_s3_view_backup_properties, use_s3_view)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass UdaRecoverJobParams:\n \"\"\"Implementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op\"\"\"\n\n def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None):\n \"\"\"Constructor for the UdaRecoverJobParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UdaRecoverJobParams:\n \"\"\"Implementation of the 'UdaRecoverJobParams' model. TODO: type description here. Attributes: capabilities (UdaSourceCapabilities): Types of backups supported. concurrency (int): Number of parallel streams to use for the restore. host_type (int): The agent host environment type. hosts (list of string): List of hosts forming the UDA cluster. local_mount_dir (string): Directory on the host where views will be mounted. (This is deprecated now and the value is derived from a gflag in agent.) mount_view (bool): Whether to mount a view during restore. mounts (int): Max number of view mounts to use for the restore. preferred_control_nodes (list of string): Control nodes to connect for control path op\"\"\"\n\n def __init__(self, capabilities=None, concurrency=None, host_type=None, hosts=None, local_mount_dir=None, mount_view=None, mounts=None, preferred_control_nodes=None, restore_args=None, restore_job_arguments_map=None, run_start_time_usecs=None, script_dir=None, source_args=None, source_arguments_map=None, source_type=None, uda_s3_view_backup_properties=None, use_s3_view=None):\n \"\"\"Constructor for the UdaRecoverJobParams class\"\"\"\n self.capabilities = capabilities\n self.concurrency = concurrency\n self.host_type = host_type\n self.hosts = hosts\n self.local_mount_dir = local_mount_dir\n self.mount_view = mount_view\n self.mounts = mounts\n self.preferred_control_nodes = preferred_control_nodes\n self.restore_args = restore_args\n self.restore_job_arguments_map = restore_job_arguments_map\n self.run_start_time_usecs = run_start_time_usecs\n self.script_dir = script_dir\n self.source_args = source_args\n self.source_arguments_map = source_arguments_map\n self.source_type = source_type\n self.uda_s3_view_backup_properties = uda_s3_view_backup_properties\n self.use_s3_view = use_s3_view\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n capabilities = cohesity_management_sdk.models.uda_source_capabilities.UdaSourceCapabilities.from_dictionary(dictionary.get('capabilities')) if dictionary.get('capabilities') else None\n concurrency = dictionary.get('concurrency')\n host_type = dictionary.get('hostType')\n hosts = dictionary.get('hosts')\n local_mount_dir = dictionary.get('localMountDir')\n mount_view = dictionary.get('mountView')\n mounts = dictionary.get('mounts')\n preferred_control_nodes = dictionary.get('preferredControlNodes')\n restore_args = dictionary.get('restoreArgs')\n restore_job_arguments_map = None\n if dictionary.get('restoreJobArgumentsMap') != None:\n restore_job_arguments_map = list()\n for structure in dictionary.get('restoreJobArgumentsMap'):\n restore_job_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_restore_job_arguments_map_entry.UdaRecoverJobParams_RestoreJobArgumentsMapEntry.from_dictionary(structure))\n run_start_time_usecs = dictionary.get('runStartTimeUsecs')\n script_dir = dictionary.get('scriptDir')\n source_args = dictionary.get('sourceArgs')\n source_arguments_map = None\n if dictionary.get('sourceArgumentsMap') != None:\n source_arguments_map = list()\n for structure in dictionary.get('sourceArgumentsMap'):\n source_arguments_map.append(cohesity_management_sdk.models.uda_recover_job_params_source_arguments_map_entry.UdaRecoverJobParams_SourceArgumentsMapEntry.from_dictionary(structure))\n source_type = dictionary.get('sourceType')\n uda_s3_view_backup_properties = cohesity_management_sdk.models.uda_s3_view_backup_properties.UdaS3ViewBackupProperties.from_dictionary(dictionary.get('udaS3ViewBackupProperties')) if dictionary.get('udaS3ViewBackupProperties') else None\n use_s3_view = dictionary.get('useS3View')\n return cls(capabilities, concurrency, host_type, hosts, local_mount_dir, mount_view, mounts, preferred_control_nodes, restore_args, restore_job_arguments_map, run_start_time_usecs, script_dir, source_args, source_arguments_map, source_type, uda_s3_view_backup_properties, use_s3_view)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/uda_recover_job_params.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "bca5312e2d829af609e673de5447a2eee714d989", "bodies": ["d = {}\nfor i in nums:\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\nx = d.get(0, 0)\ny = d.get(1, 0)\nz = d.get(2, 0)\nnums[:x] = [0] * x\nnums[x:x + y] = [1] * y\nnums[x + y:] = [2] * z", "start = 0\nend = len(nums) - 1\ni = 0\nwhile i <= end:\n if nums[i] == 0:\n nums[start], nums[i] = (nums[i], nums[start])\n start += 1\n i += 1\n elif nums[i] == 2:\n nums[end], nums[i] = (nums[i], nums[end])\n end -= 1\n else:\n i += 1"], "bodies_text": "<|body_start_0|>\n d = {}\n for i in nums:\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\n x = d.get(0, 0)\n y = d.get(1, 0)\n z = d.get(2, 0)\n nums[:x] = [0] * x\n nums[x:x + y] = [1] * y\n nums[x + y:] = [2] * z\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n end = len(nums) - 1\n i = 0\n while i <= end:\n if nums[i] == 0:\n nums[start], nums[i] = (nums[i], nums[start])\n start += 1\n i += 1\n elif nums[i] == 2:\n nums[end], nums[i] = (nums[i], nums[end])\n end -= 1\n else:\n i += 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def sortColors(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通\"\"\"\n <|body_0|>\n\n def sortColors2(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = {}\n for i in nums:\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\n x = d.get(0, 0)\n y = d.get(1, 0)\n z = d.get(2, 0)\n nums[:x] = [0] * x\n nums[x:x + y] = [1] * y\n nums[x + y:] = [2] * z\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n end = len(nums) - 1\n i = 0\n while i <= end:\n if nums[i] == 0:\n nums[start], nums[i] = (nums[i], nums[start])\n start += 1\n i += 1\n elif nums[i] == 2:\n nums[end], nums[i] = (nums[i], nums[end])\n end -= 1\n else:\n i += 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000223", "length_bytes": 1037, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通", "name": "sortColors", "signature": "def sortColors(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶", "name": "sortColors2", "signature": "def sortColors2(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030828", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def sortColors(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通\n- def sortColors2(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def sortColors(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通\n- def sortColors2(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶\n\n<|skeleton|>\nclass Solution:\n\n def sortColors(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通\"\"\"\n <|body_0|>\n\n def sortColors2(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = {}\n for i in nums:\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\n x = d.get(0, 0)\n y = d.get(1, 0)\n z = d.get(2, 0)\n nums[:x] = [0] * x\n nums[x:x + y] = [1] * y\n nums[x + y:] = [2] * z\n<|end_body_0|>\n\n<|body_start_1|>\n start = 0\n end = len(nums) - 1\n i = 0\n while i <= end:\n if nums[i] == 0:\n nums[start], nums[i] = (nums[i], nums[start])\n start += 1\n i += 1\n elif nums[i] == 2:\n nums[end], nums[i] = (nums[i], nums[end])\n end -= 1\n else:\n i += 1\n<|end_body_1|>\n", "revision_id": "624975f767f6efa1d7361cc077eaebc344d57210", "skeleton": "<|skeleton|>\nclass Solution:\n\n def sortColors(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通\"\"\"\n <|body_0|>\n\n def sortColors2(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def sortColors(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 普通\"\"\"\n d = {}\n for i in nums:\n if i in d:\n d[i] += 1\n else:\n d[i] = 1\n x = d.get(0, 0)\n y = d.get(1, 0)\n z = d.get(2, 0)\n nums[:x] = [0] * x\n nums[x:x + y] = [1] * y\n nums[x + y:] = [2] * z\n\n def sortColors2(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead. 进阶\"\"\"\n start = 0\n end = len(nums) - 1\n i = 0\n while i <= end:\n if nums[i] == 0:\n nums[start], nums[i] = (nums[i], nums[start])\n start += 1\n i += 1\n elif nums[i] == 2:\n nums[end], nums[i] = (nums[i], nums[end])\n end -= 1\n else:\n i += 1\n", "source": "the_stack_v2_python_sparse", "source_path": "75. 分类颜色.py", "source_repo": "dx19910707/LeetCode", "split": "test", "star_events_count": 0} {"blob_id": "0ec9cdf1b298f5e9f5fb1e95d35e7630cc30a3c1", "bodies": ["super().__init__(input_size=input_size, hidden_size=hidden_size, bias=bias)\nassert 0 <= dropout <= 1, 'Dropout rate must be in the range [0, 1]'\nassert 0 <= recurrent_dropout <= 1, 'Dropout rate must be in the range [0, 1]'\nself._dropout = dropout\nself._recurrent_dropout = recurrent_dropout", "if hx is None:\n hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\ndropped_input = nn.functional.dropout(input=input, p=self._dropout, training=self.training)\ndropped_hidden = nn.functional.dropout(input=hx, p=self._recurrent_dropout, training=self.training)\nreturn super().forward(dropped_input, dropped_hidden)"], "bodies_text": "<|body_start_0|>\n super().__init__(input_size=input_size, hidden_size=hidden_size, bias=bias)\n assert 0 <= dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n assert 0 <= recurrent_dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n self._dropout = dropout\n self._recurrent_dropout = recurrent_dropout\n<|end_body_0|>\n\n<|body_start_1|>\n if hx is None:\n hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n dropped_input = nn.functional.dropout(input=input, p=self._dropout, training=self.training)\n dropped_hidden = nn.functional.dropout(input=hx, p=self._recurrent_dropout, training=self.training)\n return super().forward(dropped_input, dropped_hidden)\n<|end_body_1|>\n", "class_docstring": "A wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.", "class_name": "DropoutGRUCell", "detected_licenses": ["MIT", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DropoutGRUCell:\n \"\"\"A wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.\"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0):\n \"\"\"Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.\"\"\"\n <|body_0|>\n\n def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:\n \"\"\"Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(input_size=input_size, hidden_size=hidden_size, bias=bias)\n assert 0 <= dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n assert 0 <= recurrent_dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n self._dropout = dropout\n self._recurrent_dropout = recurrent_dropout\n<|end_body_0|>\n\n<|body_start_1|>\n if hx is None:\n hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n dropped_input = nn.functional.dropout(input=input, p=self._dropout, training=self.training)\n dropped_hidden = nn.functional.dropout(input=hx, p=self._recurrent_dropout, training=self.training)\n return super().forward(dropped_input, dropped_hidden)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000224", "length_bytes": 20187, "license_type": "permissive", "methods": [{"docstring": "Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.", "name": "__init__", "signature": "def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0)"}, {"docstring": "Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.", "name": "forward", "signature": "def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_008907", "prompt": "Implement the Python class `DropoutGRUCell` described below.\n\nClass description:\nA wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0): Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.\n- def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor: Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.", "prompted_full_text": "Implement the Python class `DropoutGRUCell` described below.\n\nClass description:\nA wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0): Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.\n- def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor: Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.\n\n<|skeleton|>\nclass DropoutGRUCell:\n \"\"\"A wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.\"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0):\n \"\"\"Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.\"\"\"\n <|body_0|>\n\n def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:\n \"\"\"Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(input_size=input_size, hidden_size=hidden_size, bias=bias)\n assert 0 <= dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n assert 0 <= recurrent_dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n self._dropout = dropout\n self._recurrent_dropout = recurrent_dropout\n<|end_body_0|>\n\n<|body_start_1|>\n if hx is None:\n hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n dropped_input = nn.functional.dropout(input=input, p=self._dropout, training=self.training)\n dropped_hidden = nn.functional.dropout(input=hx, p=self._recurrent_dropout, training=self.training)\n return super().forward(dropped_input, dropped_hidden)\n<|end_body_1|>\n", "revision_id": "8fa75e67c0db8f632b135379740051cd10ff31f2", "skeleton": "<|skeleton|>\nclass DropoutGRUCell:\n \"\"\"A wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.\"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0):\n \"\"\"Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.\"\"\"\n <|body_0|>\n\n def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:\n \"\"\"Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DropoutGRUCell:\n \"\"\"A wrapper around torch.nn.GRUCell that adds dropout to inputs and hidden. This wrapper makes the implementation more in-line with that of tf.keras.layers.GRUCell It doesn't accomplish it entirely, because: 1) the base GRU cell is different (the ordering of when the reset gate is applied is different between torch and tensorflow). 2) in the TF implementation a different mask is applied to the input/hidden vectors when passed into different gates.\"\"\"\n\n def __init__(self, input_size, hidden_size, bias=True, dropout: float=0.0, recurrent_dropout: float=0.0):\n \"\"\"Args: input_size: Dimensionality of the input to the GRUCell hidden_size: Dimensionality of the hidden dimension of the GRUCell bias (optional): If False, then the layer does not use bias weights b_ih and b_hh. Defaults to True. dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0. recurrent_dropout (optional): Fraction of the units to drop for the linear transformation of the inputs. Defaults to 0.0.\"\"\"\n super().__init__(input_size=input_size, hidden_size=hidden_size, bias=bias)\n assert 0 <= dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n assert 0 <= recurrent_dropout <= 1, 'Dropout rate must be in the range [0, 1]'\n self._dropout = dropout\n self._recurrent_dropout = recurrent_dropout\n\n def forward(self, input: Tensor, hx: Optional[Tensor]=None) -> Tensor:\n \"\"\"Apply dropout to input and hidden tensors (`hx`), and pass them to the torch GRUCell.\"\"\"\n if hx is None:\n hx = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)\n dropped_input = nn.functional.dropout(input=input, p=self._dropout, training=self.training)\n dropped_hidden = nn.functional.dropout(input=hx, p=self._recurrent_dropout, training=self.training)\n return super().forward(dropped_input, dropped_hidden)\n", "source": "the_stack_v2_python_sparse", "source_path": "rlo/src/rlo/model/layers.py", "source_repo": "tomjaguarpaw/knossos-ksc", "split": "test", "star_events_count": 0} {"blob_id": "72e5f490f2e100c789ec8a8881b8e39a28149546", "bodies": ["data_model = self._sdc_definitions.data_model\nmdib = self._mdib_wref()\nif mdib is None:\n raise ApiUsageError('no mdib information')\ncontext_descriptor_container = mdib.descriptions.handle.get_one(descriptor_handle)\nif handle is None:\n cls = data_model.get_state_container_class(context_descriptor_container.STATE_QNAME)\n obj = cls(descriptor_container=context_descriptor_container)\n obj.Handle = descriptor_handle\nelse:\n _obj = mdib.context_states.handle.get_one(handle)\n obj = _obj.mk_copy()\nreturn obj", "data_model = self._sdc_definitions.data_model\ntmp = ', '.join([f'{st.__class__.__name__}(DescriptorHandle={st.DescriptorHandle}, handle={st.Handle})' for st in proposed_context_states])\nself._logger.info('set_context_state {}', tmp)\nrequest = data_model.msg_types.SetContextState()\nrequest.OperationHandleRef = operation_handle\nrequest.ProposedContextState.extend(proposed_context_states)\ninf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\nmessage = self._msg_factory.mk_soap_message(inf, payload=request)\nreturn self._call_operation(message, request_manipulator=request_manipulator)", "data_model = self._sdc_definitions.data_model\nrequest = data_model.msg_types.GetContextStates()\nif handles is not None:\n request.HandleRef.extend(handles)\ninf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\nmessage = self._msg_factory.mk_soap_message(inf, payload=request)\nreceived_message_data = self.post_message(message, request_manipulator=request_manipulator)\ncls = received_message_data.msg_reader.msg_types.GetContextStatesResponse\nreport = cls.from_node(received_message_data.p_msg.msg_node)\nreturn GetRequestResult(received_message_data, report)", "data_model = self._sdc_definitions.data_model\nrequest = data_model.msg_types.GetContextStatesByIdentification()\nif identifications is not None:\n request.Identification.extend(identifications)\nrequest.ContextType = context_type\ninf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\nmessage = self._msg_factory.mk_soap_message(inf, payload=request)\nreceived_message_data = self.post_message(message, request_manipulator=request_manipulator)\ncls = data_model.msg_types.GetContextStatesByIdentificationResponse\nreport = cls.from_node(received_message_data.p_msg.msg_node)\nreturn GetRequestResult(received_message_data, report)", "data_model = self._sdc_definitions.data_model\nrequest = data_model.msg_types.GetContextStatesByFilter()\nrequest.Filter.extend(filters)\ninf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\nmessage = self._msg_factory.mk_soap_message(inf, payload=request)\nreceived_message_data = self.post_message(message, request_manipulator=request_manipulator)\ncls = data_model.msg_types.GetContextStatesByFilterResponse\nreport = cls.from_node(received_message_data.p_msg.msg_node)\nreturn GetRequestResult(received_message_data, report)"], "bodies_text": "<|body_start_0|>\n data_model = self._sdc_definitions.data_model\n mdib = self._mdib_wref()\n if mdib is None:\n raise ApiUsageError('no mdib information')\n context_descriptor_container = mdib.descriptions.handle.get_one(descriptor_handle)\n if handle is None:\n cls = data_model.get_state_container_class(context_descriptor_container.STATE_QNAME)\n obj = cls(descriptor_container=context_descriptor_container)\n obj.Handle = descriptor_handle\n else:\n _obj = mdib.context_states.handle.get_one(handle)\n obj = _obj.mk_copy()\n return obj\n<|end_body_0|>\n\n<|body_start_1|>\n data_model = self._sdc_definitions.data_model\n tmp = ', '.join([f'{st.__class__.__name__}(DescriptorHandle={st.DescriptorHandle}, handle={st.Handle})' for st in proposed_context_states])\n self._logger.info('set_context_state {}', tmp)\n request = data_model.msg_types.SetContextState()\n request.OperationHandleRef = operation_handle\n request.ProposedContextState.extend(proposed_context_states)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n return self._call_operation(message, request_manipulator=request_manipulator)\n<|end_body_1|>\n\n<|body_start_2|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStates()\n if handles is not None:\n request.HandleRef.extend(handles)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = received_message_data.msg_reader.msg_types.GetContextStatesResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_2|>\n\n<|body_start_3|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByIdentification()\n if identifications is not None:\n request.Identification.extend(identifications)\n request.ContextType = context_type\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByIdentificationResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_3|>\n\n<|body_start_4|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByFilter()\n request.Filter.extend(filters)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByFilterResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_4|>\n", "class_docstring": "Client for ContextService.", "class_name": "ContextServiceClient", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ContextServiceClient:\n \"\"\"Client for ContextService.\"\"\"\n\n def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol:\n \"\"\"Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance\"\"\"\n <|body_0|>\n\n def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future:\n \"\"\"Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object\"\"\"\n <|body_1|>\n\n def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call\"\"\"\n <|body_2|>\n\n def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:\"\"\"\n <|body_3|>\n\n def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data_model = self._sdc_definitions.data_model\n mdib = self._mdib_wref()\n if mdib is None:\n raise ApiUsageError('no mdib information')\n context_descriptor_container = mdib.descriptions.handle.get_one(descriptor_handle)\n if handle is None:\n cls = data_model.get_state_container_class(context_descriptor_container.STATE_QNAME)\n obj = cls(descriptor_container=context_descriptor_container)\n obj.Handle = descriptor_handle\n else:\n _obj = mdib.context_states.handle.get_one(handle)\n obj = _obj.mk_copy()\n return obj\n<|end_body_0|>\n\n<|body_start_1|>\n data_model = self._sdc_definitions.data_model\n tmp = ', '.join([f'{st.__class__.__name__}(DescriptorHandle={st.DescriptorHandle}, handle={st.Handle})' for st in proposed_context_states])\n self._logger.info('set_context_state {}', tmp)\n request = data_model.msg_types.SetContextState()\n request.OperationHandleRef = operation_handle\n request.ProposedContextState.extend(proposed_context_states)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n return self._call_operation(message, request_manipulator=request_manipulator)\n<|end_body_1|>\n\n<|body_start_2|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStates()\n if handles is not None:\n request.HandleRef.extend(handles)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = received_message_data.msg_reader.msg_types.GetContextStatesResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_2|>\n\n<|body_start_3|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByIdentification()\n if identifications is not None:\n request.Identification.extend(identifications)\n request.ContextType = context_type\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByIdentificationResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_3|>\n\n<|body_start_4|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByFilter()\n request.Filter.extend(filters)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByFilterResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000225", "length_bytes": 7104, "license_type": "permissive", "methods": [{"docstring": "Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance", "name": "mk_proposed_context_object", "signature": "def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol"}, {"docstring": "Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object", "name": "set_context_state", "signature": "def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future"}, {"docstring": "Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call", "name": "get_context_states", "signature": "def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult"}, {"docstring": "Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:", "name": "get_context_state_by_identification", "signature": "def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult"}, {"docstring": "Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult", "name": "get_context_state_by_filter", "signature": "def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_008981", "prompt": "Implement the Python class `ContextServiceClient` described below.\n\nClass description:\nClient for ContextService.\n\nMethod signatures and docstrings:\n- def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol: Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance\n- def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future: Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object\n- def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult: Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call\n- def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult: Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:\n- def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult: Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult", "prompted_full_text": "Implement the Python class `ContextServiceClient` described below.\n\nClass description:\nClient for ContextService.\n\nMethod signatures and docstrings:\n- def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol: Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance\n- def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future: Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object\n- def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult: Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call\n- def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult: Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:\n- def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult: Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult\n\n<|skeleton|>\nclass ContextServiceClient:\n \"\"\"Client for ContextService.\"\"\"\n\n def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol:\n \"\"\"Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance\"\"\"\n <|body_0|>\n\n def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future:\n \"\"\"Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object\"\"\"\n <|body_1|>\n\n def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call\"\"\"\n <|body_2|>\n\n def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:\"\"\"\n <|body_3|>\n\n def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data_model = self._sdc_definitions.data_model\n mdib = self._mdib_wref()\n if mdib is None:\n raise ApiUsageError('no mdib information')\n context_descriptor_container = mdib.descriptions.handle.get_one(descriptor_handle)\n if handle is None:\n cls = data_model.get_state_container_class(context_descriptor_container.STATE_QNAME)\n obj = cls(descriptor_container=context_descriptor_container)\n obj.Handle = descriptor_handle\n else:\n _obj = mdib.context_states.handle.get_one(handle)\n obj = _obj.mk_copy()\n return obj\n<|end_body_0|>\n\n<|body_start_1|>\n data_model = self._sdc_definitions.data_model\n tmp = ', '.join([f'{st.__class__.__name__}(DescriptorHandle={st.DescriptorHandle}, handle={st.Handle})' for st in proposed_context_states])\n self._logger.info('set_context_state {}', tmp)\n request = data_model.msg_types.SetContextState()\n request.OperationHandleRef = operation_handle\n request.ProposedContextState.extend(proposed_context_states)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n return self._call_operation(message, request_manipulator=request_manipulator)\n<|end_body_1|>\n\n<|body_start_2|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStates()\n if handles is not None:\n request.HandleRef.extend(handles)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = received_message_data.msg_reader.msg_types.GetContextStatesResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_2|>\n\n<|body_start_3|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByIdentification()\n if identifications is not None:\n request.Identification.extend(identifications)\n request.ContextType = context_type\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByIdentificationResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_3|>\n\n<|body_start_4|>\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByFilter()\n request.Filter.extend(filters)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByFilterResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n<|end_body_4|>\n", "revision_id": "dab57b38ed9a9e70e6bc6a9cf44140b96fd95851", "skeleton": "<|skeleton|>\nclass ContextServiceClient:\n \"\"\"Client for ContextService.\"\"\"\n\n def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol:\n \"\"\"Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance\"\"\"\n <|body_0|>\n\n def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future:\n \"\"\"Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object\"\"\"\n <|body_1|>\n\n def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call\"\"\"\n <|body_2|>\n\n def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:\"\"\"\n <|body_3|>\n\n def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ContextServiceClient:\n \"\"\"Client for ContextService.\"\"\"\n\n def mk_proposed_context_object(self, descriptor_handle: str, handle: str | None=None) -> AbstractMultiStateProtocol:\n \"\"\"Create a state that can be used in set_context_state operation. :param descriptor_handle: the descriptor for which a state shall be created or updated :param handle: if None, a new object with default values is created (INSERT operation). Else a copy of an existing state with this handle is returned. :return: a context state instance\"\"\"\n data_model = self._sdc_definitions.data_model\n mdib = self._mdib_wref()\n if mdib is None:\n raise ApiUsageError('no mdib information')\n context_descriptor_container = mdib.descriptions.handle.get_one(descriptor_handle)\n if handle is None:\n cls = data_model.get_state_container_class(context_descriptor_container.STATE_QNAME)\n obj = cls(descriptor_container=context_descriptor_container)\n obj.Handle = descriptor_handle\n else:\n _obj = mdib.context_states.handle.get_one(handle)\n obj = _obj.mk_copy()\n return obj\n\n def set_context_state(self, operation_handle: str, proposed_context_states: list, request_manipulator: RequestManipulatorProtocol | None=None) -> Future:\n \"\"\"Send a SetContextState request. :param operation_handle: the descriptor for which a state shall be created or updated :param proposed_context_states: list of proposed context states :param request_manipulator: see documentation of RequestManipulatorProtocol :return: a concurrent.futures.Future object\"\"\"\n data_model = self._sdc_definitions.data_model\n tmp = ', '.join([f'{st.__class__.__name__}(DescriptorHandle={st.DescriptorHandle}, handle={st.Handle})' for st in proposed_context_states])\n self._logger.info('set_context_state {}', tmp)\n request = data_model.msg_types.SetContextState()\n request.OperationHandleRef = operation_handle\n request.ProposedContextState.extend(proposed_context_states)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n return self._call_operation(message, request_manipulator=request_manipulator)\n\n def get_context_states(self, handles: list[str] | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStates request. :param handles: a list of handles :param request_manipulator: see documentation of RequestManipulatorProtocol :return: result of the call\"\"\"\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStates()\n if handles is not None:\n request.HandleRef.extend(handles)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = received_message_data.msg_reader.msg_types.GetContextStatesResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n\n def get_context_state_by_identification(self, identifications: list[InstanceIdentifier], context_type: QName | None=None, request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByIdentification request. :param identifications: list of identifiers :param context_type: Type to query :param request_manipulator: see documentation of RequestManipulatorProtocol :return:\"\"\"\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByIdentification()\n if identifications is not None:\n request.Identification.extend(identifications)\n request.ContextType = context_type\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByIdentificationResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n\n def get_context_state_by_filter(self, filters: list[str], request_manipulator: RequestManipulatorProtocol | None=None) -> GetRequestResult:\n \"\"\"Send a GetContextStatesByFilter request. :param filters: List of XPath expressions. :param request_manipulator: see documentation of RequestManipulatorProtocol :return: GetRequestResult\"\"\"\n data_model = self._sdc_definitions.data_model\n request = data_model.msg_types.GetContextStatesByFilter()\n request.Filter.extend(filters)\n inf = HeaderInformationBlock(action=request.action, addr_to=self.endpoint_reference.Address)\n message = self._msg_factory.mk_soap_message(inf, payload=request)\n received_message_data = self.post_message(message, request_manipulator=request_manipulator)\n cls = data_model.msg_types.GetContextStatesByFilterResponse\n report = cls.from_node(received_message_data.p_msg.msg_node)\n return GetRequestResult(received_message_data, report)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/sdc11073/consumer/serviceclients/contextservice.py", "source_repo": "deichmab-draeger/sdc11073", "split": "test", "star_events_count": 0} {"blob_id": "34c3b2e251a594a8081fc61e90360aff9c14c6dd", "bodies": ["reviewer = data.get('reviewer')\nif reviewer:\n try:\n try:\n data['reviewer'] = User.objects.get(id=reviewer)\n except ValueError:\n data['reviewer'] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n raise exceptions.ValidationError(dict(reviewer='Reviewer is not a valid username or user id.'))\nreturn data", "reviewer = data.get('reviewer')\nif reviewer and (not instance.reviewer):\n instance.reviewer = reviewer\n instance.save()\nstatus = data.get('status')\nif status:\n try:\n instance.update(status=status, response_message=data.get('response_message'), cancel_message=data.get('cancel_message'), user=self.context['request'].user, filters=dict(filter_a=data.get('filter_a')))\n except ValueError as exc:\n raise exceptions.ValidationError(dict(status=str(exc)))\nreturn instance"], "bodies_text": "<|body_start_0|>\n reviewer = data.get('reviewer')\n if reviewer:\n try:\n try:\n data['reviewer'] = User.objects.get(id=reviewer)\n except ValueError:\n data['reviewer'] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n raise exceptions.ValidationError(dict(reviewer='Reviewer is not a valid username or user id.'))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n reviewer = data.get('reviewer')\n if reviewer and (not instance.reviewer):\n instance.reviewer = reviewer\n instance.save()\n status = data.get('status')\n if status:\n try:\n instance.update(status=status, response_message=data.get('response_message'), cancel_message=data.get('cancel_message'), user=self.context['request'].user, filters=dict(filter_a=data.get('filter_a')))\n except ValueError as exc:\n raise exceptions.ValidationError(dict(status=str(exc)))\n return instance\n<|end_body_1|>\n", "class_docstring": "The request data when updating a review.", "class_name": "ReviewUpdateSerializer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReviewUpdateSerializer:\n \"\"\"The request data when updating a review.\"\"\"\n\n def validate(self, data):\n \"\"\"Override of super class's validate method to ensure assigned reviewer is a user.\"\"\"\n <|body_0|>\n\n def update(self, instance, data):\n \"\"\"Override to call update method on the review.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n reviewer = data.get('reviewer')\n if reviewer:\n try:\n try:\n data['reviewer'] = User.objects.get(id=reviewer)\n except ValueError:\n data['reviewer'] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n raise exceptions.ValidationError(dict(reviewer='Reviewer is not a valid username or user id.'))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n reviewer = data.get('reviewer')\n if reviewer and (not instance.reviewer):\n instance.reviewer = reviewer\n instance.save()\n status = data.get('status')\n if status:\n try:\n instance.update(status=status, response_message=data.get('response_message'), cancel_message=data.get('cancel_message'), user=self.context['request'].user, filters=dict(filter_a=data.get('filter_a')))\n except ValueError as exc:\n raise exceptions.ValidationError(dict(status=str(exc)))\n return instance\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000226", "length_bytes": 40006, "license_type": "permissive", "methods": [{"docstring": "Override of super class's validate method to ensure assigned reviewer is a user.", "name": "validate", "signature": "def validate(self, data)"}, {"docstring": "Override to call update method on the review.", "name": "update", "signature": "def update(self, instance, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024356", "prompt": "Implement the Python class `ReviewUpdateSerializer` described below.\n\nClass description:\nThe request data when updating a review.\n\nMethod signatures and docstrings:\n- def validate(self, data): Override of super class's validate method to ensure assigned reviewer is a user.\n- def update(self, instance, data): Override to call update method on the review.", "prompted_full_text": "Implement the Python class `ReviewUpdateSerializer` described below.\n\nClass description:\nThe request data when updating a review.\n\nMethod signatures and docstrings:\n- def validate(self, data): Override of super class's validate method to ensure assigned reviewer is a user.\n- def update(self, instance, data): Override to call update method on the review.\n\n<|skeleton|>\nclass ReviewUpdateSerializer:\n \"\"\"The request data when updating a review.\"\"\"\n\n def validate(self, data):\n \"\"\"Override of super class's validate method to ensure assigned reviewer is a user.\"\"\"\n <|body_0|>\n\n def update(self, instance, data):\n \"\"\"Override to call update method on the review.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n reviewer = data.get('reviewer')\n if reviewer:\n try:\n try:\n data['reviewer'] = User.objects.get(id=reviewer)\n except ValueError:\n data['reviewer'] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n raise exceptions.ValidationError(dict(reviewer='Reviewer is not a valid username or user id.'))\n return data\n<|end_body_0|>\n\n<|body_start_1|>\n reviewer = data.get('reviewer')\n if reviewer and (not instance.reviewer):\n instance.reviewer = reviewer\n instance.save()\n status = data.get('status')\n if status:\n try:\n instance.update(status=status, response_message=data.get('response_message'), cancel_message=data.get('cancel_message'), user=self.context['request'].user, filters=dict(filter_a=data.get('filter_a')))\n except ValueError as exc:\n raise exceptions.ValidationError(dict(status=str(exc)))\n return instance\n<|end_body_1|>\n", "revision_id": "b0edf060f4cc5494eef81fce62a563bd5b4e8e31", "skeleton": "<|skeleton|>\nclass ReviewUpdateSerializer:\n \"\"\"The request data when updating a review.\"\"\"\n\n def validate(self, data):\n \"\"\"Override of super class's validate method to ensure assigned reviewer is a user.\"\"\"\n <|body_0|>\n\n def update(self, instance, data):\n \"\"\"Override to call update method on the review.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ReviewUpdateSerializer:\n \"\"\"The request data when updating a review.\"\"\"\n\n def validate(self, data):\n \"\"\"Override of super class's validate method to ensure assigned reviewer is a user.\"\"\"\n reviewer = data.get('reviewer')\n if reviewer:\n try:\n try:\n data['reviewer'] = User.objects.get(id=reviewer)\n except ValueError:\n data['reviewer'] = User.objects.get(username=reviewer)\n except User.DoesNotExist:\n raise exceptions.ValidationError(dict(reviewer='Reviewer is not a valid username or user id.'))\n return data\n\n def update(self, instance, data):\n \"\"\"Override to call update method on the review.\"\"\"\n reviewer = data.get('reviewer')\n if reviewer and (not instance.reviewer):\n instance.reviewer = reviewer\n instance.save()\n status = data.get('status')\n if status:\n try:\n instance.update(status=status, response_message=data.get('response_message'), cancel_message=data.get('cancel_message'), user=self.context['request'].user, filters=dict(filter_a=data.get('filter_a')))\n except ValueError as exc:\n raise exceptions.ValidationError(dict(status=str(exc)))\n return instance\n", "source": "the_stack_v2_python_sparse", "source_path": "manager/projects/api/serializers.py", "source_repo": "stencila/hub", "split": "test", "star_events_count": 31} {"blob_id": "5a96ba46fd930eec4b41b6c7d926948309d8713b", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\nreturn TermsAndConditions()", "from .entity import Entity\nfrom .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\nfrom .terms_and_conditions_assignment import TermsAndConditionsAssignment\nfrom .entity import Entity\nfrom .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\nfrom .terms_and_conditions_assignment import TermsAndConditionsAssignment\nfields: Dict[str, Callable[[Any], None]] = {'acceptanceStatement': lambda n: setattr(self, 'acceptance_statement', n.get_str_value()), 'acceptanceStatuses': lambda n: setattr(self, 'acceptance_statuses', n.get_collection_of_object_values(TermsAndConditionsAcceptanceStatus)), 'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(TermsAndConditionsAssignment)), 'bodyText': lambda n: setattr(self, 'body_text', n.get_str_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_str_value('acceptanceStatement', self.acceptance_statement)\nwriter.write_collection_of_object_values('acceptanceStatuses', self.acceptance_statuses)\nwriter.write_collection_of_object_values('assignments', self.assignments)\nwriter.write_str_value('bodyText', self.body_text)\nwriter.write_datetime_value('createdDateTime', self.created_date_time)\nwriter.write_str_value('description', self.description)\nwriter.write_str_value('displayName', self.display_name)\nwriter.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\nwriter.write_str_value('title', self.title)\nwriter.write_int_value('version', self.version)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TermsAndConditions()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n fields: Dict[str, Callable[[Any], None]] = {'acceptanceStatement': lambda n: setattr(self, 'acceptance_statement', n.get_str_value()), 'acceptanceStatuses': lambda n: setattr(self, 'acceptance_statuses', n.get_collection_of_object_values(TermsAndConditionsAcceptanceStatus)), 'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(TermsAndConditionsAssignment)), 'bodyText': lambda n: setattr(self, 'body_text', n.get_str_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('acceptanceStatement', self.acceptance_statement)\n writer.write_collection_of_object_values('acceptanceStatuses', self.acceptance_statuses)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_str_value('bodyText', self.body_text)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('title', self.title)\n writer.write_int_value('version', self.version)\n<|end_body_2|>\n", "class_docstring": "A termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.", "class_name": "TermsAndConditions", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TermsAndConditions:\n \"\"\"A termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TermsAndConditions()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n fields: Dict[str, Callable[[Any], None]] = {'acceptanceStatement': lambda n: setattr(self, 'acceptance_statement', n.get_str_value()), 'acceptanceStatuses': lambda n: setattr(self, 'acceptance_statuses', n.get_collection_of_object_values(TermsAndConditionsAcceptanceStatus)), 'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(TermsAndConditionsAssignment)), 'bodyText': lambda n: setattr(self, 'body_text', n.get_str_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('acceptanceStatement', self.acceptance_statement)\n writer.write_collection_of_object_values('acceptanceStatuses', self.acceptance_statuses)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_str_value('bodyText', self.body_text)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('title', self.title)\n writer.write_int_value('version', self.version)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000227", "length_bytes": 5989, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_010500", "prompt": "Implement the Python class `TermsAndConditions` described below.\n\nClass description:\nA termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `TermsAndConditions` described below.\n\nClass description:\nA termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass TermsAndConditions:\n \"\"\"A termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TermsAndConditions()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n fields: Dict[str, Callable[[Any], None]] = {'acceptanceStatement': lambda n: setattr(self, 'acceptance_statement', n.get_str_value()), 'acceptanceStatuses': lambda n: setattr(self, 'acceptance_statuses', n.get_collection_of_object_values(TermsAndConditionsAcceptanceStatus)), 'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(TermsAndConditionsAssignment)), 'bodyText': lambda n: setattr(self, 'body_text', n.get_str_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('acceptanceStatement', self.acceptance_statement)\n writer.write_collection_of_object_values('acceptanceStatuses', self.acceptance_statuses)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_str_value('bodyText', self.body_text)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('title', self.title)\n writer.write_int_value('version', self.version)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass TermsAndConditions:\n \"\"\"A termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TermsAndConditions:\n \"\"\"A termsAndConditions entity represents the metadata and contents of a given Terms and Conditions (T&C) policy. T&C policies’ contents are presented to users upon their first attempt to enroll into Intune and subsequently upon edits where an administrator has required re-acceptance. They enable administrators to communicate the provisions to which a user must agree in order to have devices enrolled into Intune.\"\"\"\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> TermsAndConditions:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: TermsAndConditions\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n return TermsAndConditions()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n from .entity import Entity\n from .terms_and_conditions_acceptance_status import TermsAndConditionsAcceptanceStatus\n from .terms_and_conditions_assignment import TermsAndConditionsAssignment\n fields: Dict[str, Callable[[Any], None]] = {'acceptanceStatement': lambda n: setattr(self, 'acceptance_statement', n.get_str_value()), 'acceptanceStatuses': lambda n: setattr(self, 'acceptance_statuses', n.get_collection_of_object_values(TermsAndConditionsAcceptanceStatus)), 'assignments': lambda n: setattr(self, 'assignments', n.get_collection_of_object_values(TermsAndConditionsAssignment)), 'bodyText': lambda n: setattr(self, 'body_text', n.get_str_value()), 'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'description': lambda n: setattr(self, 'description', n.get_str_value()), 'displayName': lambda n: setattr(self, 'display_name', n.get_str_value()), 'lastModifiedDateTime': lambda n: setattr(self, 'last_modified_date_time', n.get_datetime_value()), 'title': lambda n: setattr(self, 'title', n.get_str_value()), 'version': lambda n: setattr(self, 'version', n.get_int_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_str_value('acceptanceStatement', self.acceptance_statement)\n writer.write_collection_of_object_values('acceptanceStatuses', self.acceptance_statuses)\n writer.write_collection_of_object_values('assignments', self.assignments)\n writer.write_str_value('bodyText', self.body_text)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_str_value('description', self.description)\n writer.write_str_value('displayName', self.display_name)\n writer.write_datetime_value('lastModifiedDateTime', self.last_modified_date_time)\n writer.write_str_value('title', self.title)\n writer.write_int_value('version', self.version)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/terms_and_conditions.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "7e6fc7dfe086ca7f4107d8fe018cacf4df9ca23a", "bodies": ["email, password = self._decode_baseauth(trans.environ.get('HTTP_AUTHORIZATION'))\nuser = trans.sa_session.query(trans.app.model.User).filter(trans.app.model.User.table.c.email == email).all()\nif len(user) is not 1:\n raise ObjectNotFound\nelse:\n user = user[0]\n is_valid_user = user.check_password(password)\nif is_valid_user:\n user_id = user.id\n api_key_row = trans.sa_session.query(trans.app.model.APIKeys).filter(trans.app.model.APIKeys.table.c.user_id == user_id).first()\nelse:\n trans.response.status = 500\n return 'invalid password'\nreturn dict(api_key=api_key_row.key)", "split = encoded_str.strip().split(' ')\nif len(split) == 1:\n try:\n email, password = b64decode(split[0]).split(':')\n except:\n raise HTTPBadRequest\nelif len(split) == 2:\n if split[0].strip().lower() == 'basic':\n try:\n email, password = b64decode(split[1]).split(':')\n except:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\nelse:\n raise HTTPBadRequest\nreturn (unquote(email), unquote(password))"], "bodies_text": "<|body_start_0|>\n email, password = self._decode_baseauth(trans.environ.get('HTTP_AUTHORIZATION'))\n user = trans.sa_session.query(trans.app.model.User).filter(trans.app.model.User.table.c.email == email).all()\n if len(user) is not 1:\n raise ObjectNotFound\n else:\n user = user[0]\n is_valid_user = user.check_password(password)\n if is_valid_user:\n user_id = user.id\n api_key_row = trans.sa_session.query(trans.app.model.APIKeys).filter(trans.app.model.APIKeys.table.c.user_id == user_id).first()\n else:\n trans.response.status = 500\n return 'invalid password'\n return dict(api_key=api_key_row.key)\n<|end_body_0|>\n\n<|body_start_1|>\n split = encoded_str.strip().split(' ')\n if len(split) == 1:\n try:\n email, password = b64decode(split[0]).split(':')\n except:\n raise HTTPBadRequest\n elif len(split) == 2:\n if split[0].strip().lower() == 'basic':\n try:\n email, password = b64decode(split[1]).split(':')\n except:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n return (unquote(email), unquote(password))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AuthenticationController", "detected_licenses": ["CC-BY-2.5", "AFL-2.1", "AFL-3.0", "CC-BY-3.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuthenticationController:\n\n def get_api_key(self, trans, **kwd):\n \"\"\"def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest\"\"\"\n <|body_0|>\n\n def _decode_baseauth(self, encoded_str):\n \"\"\"Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n email, password = self._decode_baseauth(trans.environ.get('HTTP_AUTHORIZATION'))\n user = trans.sa_session.query(trans.app.model.User).filter(trans.app.model.User.table.c.email == email).all()\n if len(user) is not 1:\n raise ObjectNotFound\n else:\n user = user[0]\n is_valid_user = user.check_password(password)\n if is_valid_user:\n user_id = user.id\n api_key_row = trans.sa_session.query(trans.app.model.APIKeys).filter(trans.app.model.APIKeys.table.c.user_id == user_id).first()\n else:\n trans.response.status = 500\n return 'invalid password'\n return dict(api_key=api_key_row.key)\n<|end_body_0|>\n\n<|body_start_1|>\n split = encoded_str.strip().split(' ')\n if len(split) == 1:\n try:\n email, password = b64decode(split[0]).split(':')\n except:\n raise HTTPBadRequest\n elif len(split) == 2:\n if split[0].strip().lower() == 'basic':\n try:\n email, password = b64decode(split[1]).split(':')\n except:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n return (unquote(email), unquote(password))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000228", "length_bytes": 3135, "license_type": "permissive", "methods": [{"docstring": "def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest", "name": "get_api_key", "signature": "def get_api_key(self, trans, **kwd)"}, {"docstring": "Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest", "name": "_decode_baseauth", "signature": "def _decode_baseauth(self, encoded_str)"}], "n_methods": 2, "prompt": "Implement the Python class `AuthenticationController` described below.\n\nClass description:\nImplement the AuthenticationController class.\n\nMethod signatures and docstrings:\n- def get_api_key(self, trans, **kwd): def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest\n- def _decode_baseauth(self, encoded_str): Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest", "prompted_full_text": "Implement the Python class `AuthenticationController` described below.\n\nClass description:\nImplement the AuthenticationController class.\n\nMethod signatures and docstrings:\n- def get_api_key(self, trans, **kwd): def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest\n- def _decode_baseauth(self, encoded_str): Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest\n\n<|skeleton|>\nclass AuthenticationController:\n\n def get_api_key(self, trans, **kwd):\n \"\"\"def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest\"\"\"\n <|body_0|>\n\n def _decode_baseauth(self, encoded_str):\n \"\"\"Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n email, password = self._decode_baseauth(trans.environ.get('HTTP_AUTHORIZATION'))\n user = trans.sa_session.query(trans.app.model.User).filter(trans.app.model.User.table.c.email == email).all()\n if len(user) is not 1:\n raise ObjectNotFound\n else:\n user = user[0]\n is_valid_user = user.check_password(password)\n if is_valid_user:\n user_id = user.id\n api_key_row = trans.sa_session.query(trans.app.model.APIKeys).filter(trans.app.model.APIKeys.table.c.user_id == user_id).first()\n else:\n trans.response.status = 500\n return 'invalid password'\n return dict(api_key=api_key_row.key)\n<|end_body_0|>\n\n<|body_start_1|>\n split = encoded_str.strip().split(' ')\n if len(split) == 1:\n try:\n email, password = b64decode(split[0]).split(':')\n except:\n raise HTTPBadRequest\n elif len(split) == 2:\n if split[0].strip().lower() == 'basic':\n try:\n email, password = b64decode(split[1]).split(':')\n except:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n return (unquote(email), unquote(password))\n<|end_body_1|>\n", "revision_id": "acc0267b86ad6a9e5e1619d494c20407d4710e90", "skeleton": "<|skeleton|>\nclass AuthenticationController:\n\n def get_api_key(self, trans, **kwd):\n \"\"\"def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest\"\"\"\n <|body_0|>\n\n def _decode_baseauth(self, encoded_str):\n \"\"\"Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuthenticationController:\n def get_api_key(self, trans, **kwd):\n \"\"\"def get_api_key( self, trans, **kwd ) * GET /api/authenticate/baseauth returns an API key for authenticated user based on BaseAuth headers :returns: api_key in json format :rtype: dict :raises: ObjectNotFound, HTTPBadRequest\"\"\"\n email, password = self._decode_baseauth(trans.environ.get('HTTP_AUTHORIZATION'))\n user = trans.sa_session.query(trans.app.model.User).filter(trans.app.model.User.table.c.email == email).all()\n if len(user) is not 1:\n raise ObjectNotFound\n else:\n user = user[0]\n is_valid_user = user.check_password(password)\n if is_valid_user:\n user_id = user.id\n api_key_row = trans.sa_session.query(trans.app.model.APIKeys).filter(trans.app.model.APIKeys.table.c.user_id == user_id).first()\n else:\n trans.response.status = 500\n return 'invalid password'\n return dict(api_key=api_key_row.key)\n\n def _decode_baseauth(self, encoded_str):\n \"\"\"Decode an encrypted HTTP basic authentication string. Returns a tuple of the form (email, password), and raises a HTTPBadRequest exception if nothing could be decoded. :param encoded_str: BaseAuth string encoded base64 :type encoded_str: string :returns: email of the user :rtype: string :returns: password of the user :rtype: string :raises: HTTPBadRequest\"\"\"\n split = encoded_str.strip().split(' ')\n if len(split) == 1:\n try:\n email, password = b64decode(split[0]).split(':')\n except:\n raise HTTPBadRequest\n elif len(split) == 2:\n if split[0].strip().lower() == 'basic':\n try:\n email, password = b64decode(split[1]).split(':')\n except:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n else:\n raise HTTPBadRequest\n return (unquote(email), unquote(password))\n", "source": "the_stack_v2_python_sparse", "source_path": "galaxy-dist/lib/galaxy/webapps/galaxy/api/authenticate.py", "source_repo": "bopopescu/Learn2Mine-Main", "split": "test", "star_events_count": 0} {"blob_id": "e7dd54e52d83e10f666f54125979c7491c6e8052", "bodies": ["self._parent = parent\nself._fixed = {}\naxes: List[GridAxis] = []\nfor ax in parent.axes():\n if ax.name in fixed.keys():\n self._fixed[ax.name] = fixed[ax.name]\n else:\n axes.append(ax)\nGrid.__init__(self, axes, *args, **kwargs)\nself.log.info('Initializing sub interpolator...')\nfor ax, val in self._fixed.items():\n self.log.info('Fixed axis %s to value %f.', ax, val)", "given_params = list(params)\nfull_params = []\nfor ax in self._parent.axes():\n if ax.name in self._fixed.keys():\n full_params.append(self._fixed[ax.name])\n else:\n full_params.append(given_params.pop(0))\nreturn self._parent(tuple(full_params))"], "bodies_text": "<|body_start_0|>\n self._parent = parent\n self._fixed = {}\n axes: List[GridAxis] = []\n for ax in parent.axes():\n if ax.name in fixed.keys():\n self._fixed[ax.name] = fixed[ax.name]\n else:\n axes.append(ax)\n Grid.__init__(self, axes, *args, **kwargs)\n self.log.info('Initializing sub interpolator...')\n for ax, val in self._fixed.items():\n self.log.info('Fixed axis %s to value %f.', ax, val)\n<|end_body_0|>\n\n<|body_start_1|>\n given_params = list(params)\n full_params = []\n for ax in self._parent.axes():\n if ax.name in self._fixed.keys():\n full_params.append(self._fixed[ax.name])\n else:\n full_params.append(given_params.pop(0))\n return self._parent(tuple(full_params))\n<|end_body_1|>\n", "class_docstring": "A grid that only gives access to a part of the parent grid.", "class_name": "SubGrid", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SubGrid:\n \"\"\"A grid that only gives access to a part of the parent grid.\"\"\"\n\n def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs):\n \"\"\"Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.\"\"\"\n <|body_0|>\n\n def __call__(self, params: Tuple) -> Any:\n \"\"\"Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._parent = parent\n self._fixed = {}\n axes: List[GridAxis] = []\n for ax in parent.axes():\n if ax.name in fixed.keys():\n self._fixed[ax.name] = fixed[ax.name]\n else:\n axes.append(ax)\n Grid.__init__(self, axes, *args, **kwargs)\n self.log.info('Initializing sub interpolator...')\n for ax, val in self._fixed.items():\n self.log.info('Fixed axis %s to value %f.', ax, val)\n<|end_body_0|>\n\n<|body_start_1|>\n given_params = list(params)\n full_params = []\n for ax in self._parent.axes():\n if ax.name in self._fixed.keys():\n full_params.append(self._fixed[ax.name])\n else:\n full_params.append(given_params.pop(0))\n return self._parent(tuple(full_params))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000229", "length_bytes": 1800, "license_type": "permissive", "methods": [{"docstring": "Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.", "name": "__init__", "signature": "def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs)"}, {"docstring": "Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.", "name": "__call__", "signature": "def __call__(self, params: Tuple) -> Any"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007455", "prompt": "Implement the Python class `SubGrid` described below.\n\nClass description:\nA grid that only gives access to a part of the parent grid.\n\nMethod signatures and docstrings:\n- def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs): Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.\n- def __call__(self, params: Tuple) -> Any: Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.", "prompted_full_text": "Implement the Python class `SubGrid` described below.\n\nClass description:\nA grid that only gives access to a part of the parent grid.\n\nMethod signatures and docstrings:\n- def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs): Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.\n- def __call__(self, params: Tuple) -> Any: Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.\n\n<|skeleton|>\nclass SubGrid:\n \"\"\"A grid that only gives access to a part of the parent grid.\"\"\"\n\n def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs):\n \"\"\"Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.\"\"\"\n <|body_0|>\n\n def __call__(self, params: Tuple) -> Any:\n \"\"\"Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._parent = parent\n self._fixed = {}\n axes: List[GridAxis] = []\n for ax in parent.axes():\n if ax.name in fixed.keys():\n self._fixed[ax.name] = fixed[ax.name]\n else:\n axes.append(ax)\n Grid.__init__(self, axes, *args, **kwargs)\n self.log.info('Initializing sub interpolator...')\n for ax, val in self._fixed.items():\n self.log.info('Fixed axis %s to value %f.', ax, val)\n<|end_body_0|>\n\n<|body_start_1|>\n given_params = list(params)\n full_params = []\n for ax in self._parent.axes():\n if ax.name in self._fixed.keys():\n full_params.append(self._fixed[ax.name])\n else:\n full_params.append(given_params.pop(0))\n return self._parent(tuple(full_params))\n<|end_body_1|>\n", "revision_id": "648eb1758e3744d9e3d6669edc4a0c4753559f17", "skeleton": "<|skeleton|>\nclass SubGrid:\n \"\"\"A grid that only gives access to a part of the parent grid.\"\"\"\n\n def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs):\n \"\"\"Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.\"\"\"\n <|body_0|>\n\n def __call__(self, params: Tuple) -> Any:\n \"\"\"Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SubGrid:\n \"\"\"A grid that only gives access to a part of the parent grid.\"\"\"\n\n def __init__(self, parent: Grid, fixed: Dict[str, float], *args, **kwargs):\n \"\"\"Initializes a new sub grid. Args: parent: Parent grid. fixed: Dictionary with fixed axis_name->value pairs.\"\"\"\n self._parent = parent\n self._fixed = {}\n axes: List[GridAxis] = []\n for ax in parent.axes():\n if ax.name in fixed.keys():\n self._fixed[ax.name] = fixed[ax.name]\n else:\n axes.append(ax)\n Grid.__init__(self, axes, *args, **kwargs)\n self.log.info('Initializing sub interpolator...')\n for ax, val in self._fixed.items():\n self.log.info('Fixed axis %s to value %f.', ax, val)\n\n def __call__(self, params: Tuple) -> Any:\n \"\"\"Fetches the value for the given parameter set Args: params: Parameter set to catch value for. Returns: Grid value at given position.\"\"\"\n given_params = list(params)\n full_params = []\n for ax in self._parent.axes():\n if ax.name in self._fixed.keys():\n full_params.append(self._fixed[ax.name])\n else:\n full_params.append(given_params.pop(0))\n return self._parent(tuple(full_params))\n", "source": "the_stack_v2_python_sparse", "source_path": "spexxy/grid/sub.py", "source_repo": "thusser/spexxy", "split": "test", "star_events_count": 4} {"blob_id": "4b145d551c66638757662eacfdf533667286a83e", "bodies": ["is_prime = [True] * max(n, 2)\nis_prime[0], is_prime[1] = (False, False)\nx = 2\nwhile x * x < n:\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n x += 1\nreturn sum(is_prime)", "is_prime = [True] * max(n, 2)\nis_prime[0], is_prime[1] = (False, False)\nfor x in range(2, int(n ** 0.5) + 1):\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\nreturn sum(is_prime)"], "bodies_text": "<|body_start_0|>\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n x = 2\n while x * x < n:\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n x += 1\n return sum(is_prime)\n<|end_body_0|>\n\n<|body_start_1|>\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n for x in range(2, int(n ** 0.5) + 1):\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n return sum(is_prime)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def countPrimes_v0(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n x = 2\n while x * x < n:\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n x += 1\n return sum(is_prime)\n<|end_body_0|>\n\n<|body_start_1|>\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n for x in range(2, int(n ** 0.5) + 1):\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n return sum(is_prime)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000230", "length_bytes": 2525, "license_type": "no_license", "methods": [{"docstring": ":type n: int :rtype: int", "name": "countPrimes", "signature": "def countPrimes(self, n)"}, {"docstring": ":type n: int :rtype: int", "name": "countPrimes_v0", "signature": "def countPrimes_v0(self, n)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020635", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countPrimes(self, n): :type n: int :rtype: int\n- def countPrimes_v0(self, n): :type n: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def countPrimes(self, n): :type n: int :rtype: int\n- def countPrimes_v0(self, n): :type n: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def countPrimes_v0(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n x = 2\n while x * x < n:\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n x += 1\n return sum(is_prime)\n<|end_body_0|>\n\n<|body_start_1|>\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n for x in range(2, int(n ** 0.5) + 1):\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n return sum(is_prime)\n<|end_body_1|>\n", "revision_id": "b5e09f24e8e96454dc99e20281e853fb9fcc85ed", "skeleton": "<|skeleton|>\nclass Solution:\n\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_0|>\n\n def countPrimes_v0(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def countPrimes(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n x = 2\n while x * x < n:\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n x += 1\n return sum(is_prime)\n\n def countPrimes_v0(self, n):\n \"\"\":type n: int :rtype: int\"\"\"\n is_prime = [True] * max(n, 2)\n is_prime[0], is_prime[1] = (False, False)\n for x in range(2, int(n ** 0.5) + 1):\n if is_prime[x]:\n p = x * x\n while p < n:\n is_prime[p] = False\n p += x\n return sum(is_prime)\n", "source": "the_stack_v2_python_sparse", "source_path": "python/204_Count_Primes.py", "source_repo": "Moby5/myleetcode", "split": "test", "star_events_count": 2} {"blob_id": "ffc826fc0d86adba2f53c0bac498d1857330abfe", "bodies": ["self.mobile_parkplayment(2)\nactual = self.driver.title\nself.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\nlogger.info('断言')\nsleep(1)\nself.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\nsleep(1)", "self.mobile_add_shopping()\nactual = self.driver.title\nself.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\nlogger.info('断言')\nsleep(2)\nself.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\nsleep(1)", "self.mobile_playment()\nactual = self.driver.title\nsleep(1)\nself.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\nlogger.info('断言')\nsleep(2)\nself.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\nsleep(1)", "self.mobile_qifuadd_shopping()\nactual = self.driver.title\nself.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\nlogger.info('断言')\nsleep(2)\nself.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\nsleep(1)", "self.mobile_playment()\nactual = self.driver.title\nself.assertEqual('支付宝', actual)\nlogger.info('断言成功')\nsleep(1)\nself.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\nsleep(1)\nself.driver.find_element_by_class_name('to_orderCenter_btn').click()\nsleep(2)\nself.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\nsleep(2)", "self.mobile_parkplayment(2)\nactual = self.driver.title\nself.assertEqual('支付宝', actual)\nlogger.info('断言成功')\nself.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\nsleep(1)\nself.driver.find_element_by_class_name('to_orderCenter_btn').click()\nsleep(1)\nself.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\nsleep(1)"], "bodies_text": "<|body_start_0|>\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(1)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_0|>\n\n<|body_start_1|>\n self.mobile_add_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_1|>\n\n<|body_start_2|>\n self.mobile_playment()\n actual = self.driver.title\n sleep(1)\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_2|>\n\n<|body_start_3|>\n self.mobile_qifuadd_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_3|>\n\n<|body_start_4|>\n self.mobile_playment()\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n sleep(1)\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(2)\n<|end_body_4|>\n\n<|body_start_5|>\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(1)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(1)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "Test_MobileZhifu", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test_MobileZhifu:\n\n def test_case01(self):\n \"\"\"官网移动直客购买园区服务0元套餐 :return:\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"官网移动直客园区服务0元套餐加入购物车进行购买 :return:\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"官网移动直客购买企服商城0元套餐 :return:\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"官网移动直客企服商城0元套餐加入购物车进行购买 :return:\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"官网移动直客购买企服商城非0元套餐 :return:\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"官网移动直客购买园区服务非0元套餐 :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(1)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_0|>\n\n<|body_start_1|>\n self.mobile_add_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_1|>\n\n<|body_start_2|>\n self.mobile_playment()\n actual = self.driver.title\n sleep(1)\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_2|>\n\n<|body_start_3|>\n self.mobile_qifuadd_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_3|>\n\n<|body_start_4|>\n self.mobile_playment()\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n sleep(1)\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(2)\n<|end_body_4|>\n\n<|body_start_5|>\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(1)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(1)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000231", "length_bytes": 3687, "license_type": "no_license", "methods": [{"docstring": "官网移动直客购买园区服务0元套餐 :return:", "name": "test_case01", "signature": "def test_case01(self)"}, {"docstring": "官网移动直客园区服务0元套餐加入购物车进行购买 :return:", "name": "test_case02", "signature": "def test_case02(self)"}, {"docstring": "官网移动直客购买企服商城0元套餐 :return:", "name": "test_case03", "signature": "def test_case03(self)"}, {"docstring": "官网移动直客企服商城0元套餐加入购物车进行购买 :return:", "name": "test_case04", "signature": "def test_case04(self)"}, {"docstring": "官网移动直客购买企服商城非0元套餐 :return:", "name": "test_case05", "signature": "def test_case05(self)"}, {"docstring": "官网移动直客购买园区服务非0元套餐 :return:", "name": "test_case06", "signature": "def test_case06(self)"}], "n_methods": 6, "prompt": "Implement the Python class `Test_MobileZhifu` described below.\n\nClass description:\nImplement the Test_MobileZhifu class.\n\nMethod signatures and docstrings:\n- def test_case01(self): 官网移动直客购买园区服务0元套餐 :return:\n- def test_case02(self): 官网移动直客园区服务0元套餐加入购物车进行购买 :return:\n- def test_case03(self): 官网移动直客购买企服商城0元套餐 :return:\n- def test_case04(self): 官网移动直客企服商城0元套餐加入购物车进行购买 :return:\n- def test_case05(self): 官网移动直客购买企服商城非0元套餐 :return:\n- def test_case06(self): 官网移动直客购买园区服务非0元套餐 :return:", "prompted_full_text": "Implement the Python class `Test_MobileZhifu` described below.\n\nClass description:\nImplement the Test_MobileZhifu class.\n\nMethod signatures and docstrings:\n- def test_case01(self): 官网移动直客购买园区服务0元套餐 :return:\n- def test_case02(self): 官网移动直客园区服务0元套餐加入购物车进行购买 :return:\n- def test_case03(self): 官网移动直客购买企服商城0元套餐 :return:\n- def test_case04(self): 官网移动直客企服商城0元套餐加入购物车进行购买 :return:\n- def test_case05(self): 官网移动直客购买企服商城非0元套餐 :return:\n- def test_case06(self): 官网移动直客购买园区服务非0元套餐 :return:\n\n<|skeleton|>\nclass Test_MobileZhifu:\n\n def test_case01(self):\n \"\"\"官网移动直客购买园区服务0元套餐 :return:\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"官网移动直客园区服务0元套餐加入购物车进行购买 :return:\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"官网移动直客购买企服商城0元套餐 :return:\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"官网移动直客企服商城0元套餐加入购物车进行购买 :return:\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"官网移动直客购买企服商城非0元套餐 :return:\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"官网移动直客购买园区服务非0元套餐 :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(1)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_0|>\n\n<|body_start_1|>\n self.mobile_add_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_1|>\n\n<|body_start_2|>\n self.mobile_playment()\n actual = self.driver.title\n sleep(1)\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_2|>\n\n<|body_start_3|>\n self.mobile_qifuadd_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n<|end_body_3|>\n\n<|body_start_4|>\n self.mobile_playment()\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n sleep(1)\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(2)\n<|end_body_4|>\n\n<|body_start_5|>\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(1)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(1)\n<|end_body_5|>\n", "revision_id": "cf92e8e81ceb5cb67217bf36993cf94fe470fd0b", "skeleton": "<|skeleton|>\nclass Test_MobileZhifu:\n\n def test_case01(self):\n \"\"\"官网移动直客购买园区服务0元套餐 :return:\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"官网移动直客园区服务0元套餐加入购物车进行购买 :return:\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"官网移动直客购买企服商城0元套餐 :return:\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"官网移动直客企服商城0元套餐加入购物车进行购买 :return:\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"官网移动直客购买企服商城非0元套餐 :return:\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"官网移动直客购买园区服务非0元套餐 :return:\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Test_MobileZhifu:\n def test_case01(self):\n \"\"\"官网移动直客购买园区服务0元套餐 :return:\"\"\"\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(1)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n\n def test_case02(self):\n \"\"\"官网移动直客园区服务0元套餐加入购物车进行购买 :return:\"\"\"\n self.mobile_add_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n\n def test_case03(self):\n \"\"\"官网移动直客购买企服商城0元套餐 :return:\"\"\"\n self.mobile_playment()\n actual = self.driver.title\n sleep(1)\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n\n def test_case04(self):\n \"\"\"官网移动直客企服商城0元套餐加入购物车进行购买 :return:\"\"\"\n self.mobile_qifuadd_shopping()\n actual = self.driver.title\n self.assertEqual('支付成功-上海注册公司_企业服务平台_代理记账_财税服务_税务筹划_工商变更_人事社保', actual)\n logger.info('断言')\n sleep(2)\n self.driver.find_element_by_xpath('//a[text() = \"订单中心\"]').click()\n sleep(1)\n\n def test_case05(self):\n \"\"\"官网移动直客购买企服商城非0元套餐 :return:\"\"\"\n self.mobile_playment()\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n sleep(1)\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(2)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(2)\n\n def test_case06(self):\n \"\"\"官网移动直客购买园区服务非0元套餐 :return:\"\"\"\n self.mobile_parkplayment(2)\n actual = self.driver.title\n self.assertEqual('支付宝', actual)\n logger.info('断言成功')\n self.driver.find_element_by_xpath('/html/body/div[5]/div/div[1]/img').click()\n sleep(1)\n self.driver.find_element_by_class_name('to_orderCenter_btn').click()\n sleep(1)\n self.driver.find_element_by_xpath('//*[@id=\"layui-layer1\"]/div[2]/section/section/button[1]').click()\n sleep(1)\n", "source": "the_stack_v2_python_sparse", "source_path": "hhr/case/mobile_qiantai/test_mobilezhifu.py", "source_repo": "aixin2000/Test_Scripts", "split": "test", "star_events_count": 0} {"blob_id": "2062385330762f6cc991bd0670ab5e036dacd831", "bodies": ["tests_startup = getattr(settings, 'TESTING_MODE', False)\nae_autostart = getattr(settings, 'DICOM_AE_AUTOSTART', True)\nae_exists = self.application_entity is not None\nae_missing = ae_autostart and (not (tests_startup or ae_exists))\nif ae_missing:\n self.application_entity = self.create_application_entity()\n self.start_servers()", "from django_dicom.models.networking import messages as networking_messages\nlogger = logging.getLogger('data.dicom.networking')\nae_title = get_application_entity_title()\nstart_message = networking_messages.APPLICATION_ENTITY_START.format(title=ae_title)\nlogger.info(start_message)\napplication_entity = AE(ae_title=ae_title)\nend_message = networking_messages.APPLICATION_ENTITY_SUCCESS\nlogger.info(end_message)\nif allow_echo:\n application_entity.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES[:])\n logger.debug(networking_messages.C_ECHO_ENABLED)\napplication_entity.maximum_pdu_size = maximum_pdu_size\nif maximum_pdu_size != 0:\n message = networking_messages.PDU_LIMIT_CONFIGURATION.format(maximum_pdu_size=maximum_pdu_size)\n logger.debug(message)\nreturn application_entity", "StorageServiceClassProvider = self.get_model('StorageServiceClassProvider')\ntry:\n StorageServiceClassProvider.objects.start_servers()\nexcept ProgrammingError:\n pass"], "bodies_text": "<|body_start_0|>\n tests_startup = getattr(settings, 'TESTING_MODE', False)\n ae_autostart = getattr(settings, 'DICOM_AE_AUTOSTART', True)\n ae_exists = self.application_entity is not None\n ae_missing = ae_autostart and (not (tests_startup or ae_exists))\n if ae_missing:\n self.application_entity = self.create_application_entity()\n self.start_servers()\n<|end_body_0|>\n\n<|body_start_1|>\n from django_dicom.models.networking import messages as networking_messages\n logger = logging.getLogger('data.dicom.networking')\n ae_title = get_application_entity_title()\n start_message = networking_messages.APPLICATION_ENTITY_START.format(title=ae_title)\n logger.info(start_message)\n application_entity = AE(ae_title=ae_title)\n end_message = networking_messages.APPLICATION_ENTITY_SUCCESS\n logger.info(end_message)\n if allow_echo:\n application_entity.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES[:])\n logger.debug(networking_messages.C_ECHO_ENABLED)\n application_entity.maximum_pdu_size = maximum_pdu_size\n if maximum_pdu_size != 0:\n message = networking_messages.PDU_LIMIT_CONFIGURATION.format(maximum_pdu_size=maximum_pdu_size)\n logger.debug(message)\n return application_entity\n<|end_body_1|>\n\n<|body_start_2|>\n StorageServiceClassProvider = self.get_model('StorageServiceClassProvider')\n try:\n StorageServiceClassProvider.objects.start_servers()\n except ProgrammingError:\n pass\n<|end_body_2|>\n", "class_docstring": ":mod:`django_dicom`'s app configuration.", "class_name": "DjangoDicomConfig", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DjangoDicomConfig:\n \"\"\":mod:`django_dicom`'s app configuration.\"\"\"\n\n def ready(self):\n \"\"\"Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.\"\"\"\n <|body_0|>\n\n def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE:\n \"\"\"Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity\"\"\"\n <|body_1|>\n\n def start_servers(self):\n \"\"\"Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tests_startup = getattr(settings, 'TESTING_MODE', False)\n ae_autostart = getattr(settings, 'DICOM_AE_AUTOSTART', True)\n ae_exists = self.application_entity is not None\n ae_missing = ae_autostart and (not (tests_startup or ae_exists))\n if ae_missing:\n self.application_entity = self.create_application_entity()\n self.start_servers()\n<|end_body_0|>\n\n<|body_start_1|>\n from django_dicom.models.networking import messages as networking_messages\n logger = logging.getLogger('data.dicom.networking')\n ae_title = get_application_entity_title()\n start_message = networking_messages.APPLICATION_ENTITY_START.format(title=ae_title)\n logger.info(start_message)\n application_entity = AE(ae_title=ae_title)\n end_message = networking_messages.APPLICATION_ENTITY_SUCCESS\n logger.info(end_message)\n if allow_echo:\n application_entity.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES[:])\n logger.debug(networking_messages.C_ECHO_ENABLED)\n application_entity.maximum_pdu_size = maximum_pdu_size\n if maximum_pdu_size != 0:\n message = networking_messages.PDU_LIMIT_CONFIGURATION.format(maximum_pdu_size=maximum_pdu_size)\n logger.debug(message)\n return application_entity\n<|end_body_1|>\n\n<|body_start_2|>\n StorageServiceClassProvider = self.get_model('StorageServiceClassProvider')\n try:\n StorageServiceClassProvider.objects.start_servers()\n except ProgrammingError:\n pass\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000232", "length_bytes": 4437, "license_type": "permissive", "methods": [{"docstring": "Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.", "name": "ready", "signature": "def ready(self)"}, {"docstring": "Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity", "name": "create_application_entity", "signature": "def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE"}, {"docstring": "Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`", "name": "start_servers", "signature": "def start_servers(self)"}], "n_methods": 3, "prompt": "Implement the Python class `DjangoDicomConfig` described below.\n\nClass description:\n:mod:`django_dicom`'s app configuration.\n\nMethod signatures and docstrings:\n- def ready(self): Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.\n- def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE: Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity\n- def start_servers(self): Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`", "prompted_full_text": "Implement the Python class `DjangoDicomConfig` described below.\n\nClass description:\n:mod:`django_dicom`'s app configuration.\n\nMethod signatures and docstrings:\n- def ready(self): Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.\n- def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE: Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity\n- def start_servers(self): Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`\n\n<|skeleton|>\nclass DjangoDicomConfig:\n \"\"\":mod:`django_dicom`'s app configuration.\"\"\"\n\n def ready(self):\n \"\"\"Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.\"\"\"\n <|body_0|>\n\n def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE:\n \"\"\"Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity\"\"\"\n <|body_1|>\n\n def start_servers(self):\n \"\"\"Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tests_startup = getattr(settings, 'TESTING_MODE', False)\n ae_autostart = getattr(settings, 'DICOM_AE_AUTOSTART', True)\n ae_exists = self.application_entity is not None\n ae_missing = ae_autostart and (not (tests_startup or ae_exists))\n if ae_missing:\n self.application_entity = self.create_application_entity()\n self.start_servers()\n<|end_body_0|>\n\n<|body_start_1|>\n from django_dicom.models.networking import messages as networking_messages\n logger = logging.getLogger('data.dicom.networking')\n ae_title = get_application_entity_title()\n start_message = networking_messages.APPLICATION_ENTITY_START.format(title=ae_title)\n logger.info(start_message)\n application_entity = AE(ae_title=ae_title)\n end_message = networking_messages.APPLICATION_ENTITY_SUCCESS\n logger.info(end_message)\n if allow_echo:\n application_entity.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES[:])\n logger.debug(networking_messages.C_ECHO_ENABLED)\n application_entity.maximum_pdu_size = maximum_pdu_size\n if maximum_pdu_size != 0:\n message = networking_messages.PDU_LIMIT_CONFIGURATION.format(maximum_pdu_size=maximum_pdu_size)\n logger.debug(message)\n return application_entity\n<|end_body_1|>\n\n<|body_start_2|>\n StorageServiceClassProvider = self.get_model('StorageServiceClassProvider')\n try:\n StorageServiceClassProvider.objects.start_servers()\n except ProgrammingError:\n pass\n<|end_body_2|>\n", "revision_id": "cd49a08b1f0efbbb8e05b20ec03d0afc3789cf6f", "skeleton": "<|skeleton|>\nclass DjangoDicomConfig:\n \"\"\":mod:`django_dicom`'s app configuration.\"\"\"\n\n def ready(self):\n \"\"\"Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.\"\"\"\n <|body_0|>\n\n def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE:\n \"\"\"Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity\"\"\"\n <|body_1|>\n\n def start_servers(self):\n \"\"\"Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DjangoDicomConfig:\n \"\"\":mod:`django_dicom`'s app configuration.\"\"\"\n\n def ready(self):\n \"\"\"Overrides :func:`~django.apps.AppConfig.ready` to run code when Django starts. Warning ------- If the application is served with gunicorn using multiple workers, :func:`ready` is executed multiple times and causes server instantiation to raise *[Errno 98] Address already in use*. In case the application is meant to be served using multiple workers, the DICOM application entity instantiation needs to be revised.\"\"\"\n tests_startup = getattr(settings, 'TESTING_MODE', False)\n ae_autostart = getattr(settings, 'DICOM_AE_AUTOSTART', True)\n ae_exists = self.application_entity is not None\n ae_missing = ae_autostart and (not (tests_startup or ae_exists))\n if ae_missing:\n self.application_entity = self.create_application_entity()\n self.start_servers()\n\n def create_application_entity(self, allow_echo: bool=True, maximum_pdu_size: int=0) -> AE:\n \"\"\"Returns an :class:`~pynetdicom.ae.ApplicationEntity` instance. Parameters ---------- allow_echo : bool Whether to enable C-ECHO request handling or not, default is True maximum_pdu_size : int Maximal PDU size. By default, overrides pynetdicom's default setting to 0 (unlimited) Returns ------- AE DICOM networking application entity\"\"\"\n from django_dicom.models.networking import messages as networking_messages\n logger = logging.getLogger('data.dicom.networking')\n ae_title = get_application_entity_title()\n start_message = networking_messages.APPLICATION_ENTITY_START.format(title=ae_title)\n logger.info(start_message)\n application_entity = AE(ae_title=ae_title)\n end_message = networking_messages.APPLICATION_ENTITY_SUCCESS\n logger.info(end_message)\n if allow_echo:\n application_entity.add_supported_context(VerificationSOPClass, ALL_TRANSFER_SYNTAXES[:])\n logger.debug(networking_messages.C_ECHO_ENABLED)\n application_entity.maximum_pdu_size = maximum_pdu_size\n if maximum_pdu_size != 0:\n message = networking_messages.PDU_LIMIT_CONFIGURATION.format(maximum_pdu_size=maximum_pdu_size)\n logger.debug(message)\n return application_entity\n\n def start_servers(self):\n \"\"\"Creates the :class:`pynetdicom.transport.ThreadedAssociationServer` instances to manage requests from storage service class users. See Also -------- * :class:`~pynetdicom.transport.ThreadedAssociationServer` * :attr:`~pynetdicom.ae.ApplicationEntity._servers` * :func:`create_application_entity` * :attr:`application_entity`\"\"\"\n StorageServiceClassProvider = self.get_model('StorageServiceClassProvider')\n try:\n StorageServiceClassProvider.objects.start_servers()\n except ProgrammingError:\n pass\n", "source": "the_stack_v2_python_sparse", "source_path": "django_dicom/apps.py", "source_repo": "ZviBaratz/django_dicom", "split": "test", "star_events_count": 8} {"blob_id": "5e8c886594c4e4972428642f829bb38dca008a98", "bodies": ["if root == None:\n return\nvisit(root)\nself.root = root", "y = target\nr = []\nr.append(y)\nwhile True:\n if y & 1:\n t = (y - 1) / 2\n else:\n t = (y - 2) / 2\n if t < 0:\n break\n r.append(t)\n y = t\nreturn f(self.root, r[::-1], 0, len(r))"], "bodies_text": "<|body_start_0|>\n if root == None:\n return\n visit(root)\n self.root = root\n<|end_body_0|>\n\n<|body_start_1|>\n y = target\n r = []\n r.append(y)\n while True:\n if y & 1:\n t = (y - 1) / 2\n else:\n t = (y - 2) / 2\n if t < 0:\n break\n r.append(t)\n y = t\n return f(self.root, r[::-1], 0, len(r))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FindElements", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FindElements:\n\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n <|body_0|>\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root == None:\n return\n visit(root)\n self.root = root\n<|end_body_0|>\n\n<|body_start_1|>\n y = target\n r = []\n r.append(y)\n while True:\n if y & 1:\n t = (y - 1) / 2\n else:\n t = (y - 2) / 2\n if t < 0:\n break\n r.append(t)\n y = t\n return f(self.root, r[::-1], 0, len(r))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000233", "length_bytes": 1350, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode", "name": "__init__", "signature": "def __init__(self, root)"}, {"docstring": ":type target: int :rtype: bool", "name": "find", "signature": "def find(self, target)"}], "n_methods": 2, "prompt": "Implement the Python class `FindElements` described below.\n\nClass description:\nImplement the FindElements class.\n\nMethod signatures and docstrings:\n- def __init__(self, root): :type root: TreeNode\n- def find(self, target): :type target: int :rtype: bool", "prompted_full_text": "Implement the Python class `FindElements` described below.\n\nClass description:\nImplement the FindElements class.\n\nMethod signatures and docstrings:\n- def __init__(self, root): :type root: TreeNode\n- def find(self, target): :type target: int :rtype: bool\n\n<|skeleton|>\nclass FindElements:\n\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n <|body_0|>\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root == None:\n return\n visit(root)\n self.root = root\n<|end_body_0|>\n\n<|body_start_1|>\n y = target\n r = []\n r.append(y)\n while True:\n if y & 1:\n t = (y - 1) / 2\n else:\n t = (y - 2) / 2\n if t < 0:\n break\n r.append(t)\n y = t\n return f(self.root, r[::-1], 0, len(r))\n<|end_body_1|>\n", "revision_id": "fd6c8082f81bcd9eda084b347c77fd570cfbee4a", "skeleton": "<|skeleton|>\nclass FindElements:\n\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n <|body_0|>\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FindElements:\n def __init__(self, root):\n \"\"\":type root: TreeNode\"\"\"\n if root == None:\n return\n visit(root)\n self.root = root\n\n def find(self, target):\n \"\"\":type target: int :rtype: bool\"\"\"\n y = target\n r = []\n r.append(y)\n while True:\n if y & 1:\n t = (y - 1) / 2\n else:\n t = (y - 2) / 2\n if t < 0:\n break\n r.append(t)\n y = t\n return f(self.root, r[::-1], 0, len(r))\n", "source": "the_stack_v2_python_sparse", "source_path": "problems1k/1261/test.py", "source_repo": "neuxxm/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "9ecaa0a028b67d3592c31256e166954b3552f239", "bodies": ["try:\n self.bluetooth_port = serial.Serial('/dev/rfcomm0', 9600)\n self.rover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.rover_socket.connect((ip, port))\n self.messagesListenerThread = threading.Thread(target=self.handle_incoming_messages)\n self.messagesListenerThread.start()\n self.messagesListenerThread.join()\nfinally:\n self.cleanUp()", "while True:\n message = NetworkHandler().receive(self.rover_socket)\n if message:\n print(message)\n self.bluetooth_port.write(message)\n else:\n return", "try:\n if self.rover_socket is not None:\n self.rover_socket.close()\n if self.bluetooth_port is not None:\n self.bluetooth_port.close()\nexcept Exception as e:\n print(e)"], "bodies_text": "<|body_start_0|>\n try:\n self.bluetooth_port = serial.Serial('/dev/rfcomm0', 9600)\n self.rover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.rover_socket.connect((ip, port))\n self.messagesListenerThread = threading.Thread(target=self.handle_incoming_messages)\n self.messagesListenerThread.start()\n self.messagesListenerThread.join()\n finally:\n self.cleanUp()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n message = NetworkHandler().receive(self.rover_socket)\n if message:\n print(message)\n self.bluetooth_port.write(message)\n else:\n return\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if self.rover_socket is not None:\n self.rover_socket.close()\n if self.bluetooth_port is not None:\n self.bluetooth_port.close()\n except Exception as e:\n print(e)\n<|end_body_2|>\n", "class_docstring": "A Class for handling communication with the Server operating externally.", "class_name": "Rover", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Rover:\n \"\"\"A Class for handling communication with the Server operating externally.\"\"\"\n\n def start(self, ip='192.168.100.14', port=6909):\n \"\"\"Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()\"\"\"\n <|body_0|>\n\n def handle_incoming_messages(self):\n \"\"\"Listens for messages from the Rover.\"\"\"\n <|body_1|>\n\n def cleanUp(self):\n \"\"\"Closes Connections.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.bluetooth_port = serial.Serial('/dev/rfcomm0', 9600)\n self.rover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.rover_socket.connect((ip, port))\n self.messagesListenerThread = threading.Thread(target=self.handle_incoming_messages)\n self.messagesListenerThread.start()\n self.messagesListenerThread.join()\n finally:\n self.cleanUp()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n message = NetworkHandler().receive(self.rover_socket)\n if message:\n print(message)\n self.bluetooth_port.write(message)\n else:\n return\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if self.rover_socket is not None:\n self.rover_socket.close()\n if self.bluetooth_port is not None:\n self.bluetooth_port.close()\n except Exception as e:\n print(e)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000234", "length_bytes": 2008, "license_type": "no_license", "methods": [{"docstring": "Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()", "name": "start", "signature": "def start(self, ip='192.168.100.14', port=6909)"}, {"docstring": "Listens for messages from the Rover.", "name": "handle_incoming_messages", "signature": "def handle_incoming_messages(self)"}, {"docstring": "Closes Connections.", "name": "cleanUp", "signature": "def cleanUp(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006572", "prompt": "Implement the Python class `Rover` described below.\n\nClass description:\nA Class for handling communication with the Server operating externally.\n\nMethod signatures and docstrings:\n- def start(self, ip='192.168.100.14', port=6909): Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()\n- def handle_incoming_messages(self): Listens for messages from the Rover.\n- def cleanUp(self): Closes Connections.", "prompted_full_text": "Implement the Python class `Rover` described below.\n\nClass description:\nA Class for handling communication with the Server operating externally.\n\nMethod signatures and docstrings:\n- def start(self, ip='192.168.100.14', port=6909): Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()\n- def handle_incoming_messages(self): Listens for messages from the Rover.\n- def cleanUp(self): Closes Connections.\n\n<|skeleton|>\nclass Rover:\n \"\"\"A Class for handling communication with the Server operating externally.\"\"\"\n\n def start(self, ip='192.168.100.14', port=6909):\n \"\"\"Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()\"\"\"\n <|body_0|>\n\n def handle_incoming_messages(self):\n \"\"\"Listens for messages from the Rover.\"\"\"\n <|body_1|>\n\n def cleanUp(self):\n \"\"\"Closes Connections.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.bluetooth_port = serial.Serial('/dev/rfcomm0', 9600)\n self.rover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.rover_socket.connect((ip, port))\n self.messagesListenerThread = threading.Thread(target=self.handle_incoming_messages)\n self.messagesListenerThread.start()\n self.messagesListenerThread.join()\n finally:\n self.cleanUp()\n<|end_body_0|>\n\n<|body_start_1|>\n while True:\n message = NetworkHandler().receive(self.rover_socket)\n if message:\n print(message)\n self.bluetooth_port.write(message)\n else:\n return\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n if self.rover_socket is not None:\n self.rover_socket.close()\n if self.bluetooth_port is not None:\n self.bluetooth_port.close()\n except Exception as e:\n print(e)\n<|end_body_2|>\n", "revision_id": "562febd0a61dede41e114845f84265b8e2664d3a", "skeleton": "<|skeleton|>\nclass Rover:\n \"\"\"A Class for handling communication with the Server operating externally.\"\"\"\n\n def start(self, ip='192.168.100.14', port=6909):\n \"\"\"Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()\"\"\"\n <|body_0|>\n\n def handle_incoming_messages(self):\n \"\"\"Listens for messages from the Rover.\"\"\"\n <|body_1|>\n\n def cleanUp(self):\n \"\"\"Closes Connections.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Rover:\n \"\"\"A Class for handling communication with the Server operating externally.\"\"\"\n\n def start(self, ip='192.168.100.14', port=6909):\n \"\"\"Kickstarts the rover. - Creates socket - Sends connection request to Server - Once connected, performs the following in parallel: - calls listen()\"\"\"\n try:\n self.bluetooth_port = serial.Serial('/dev/rfcomm0', 9600)\n self.rover_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.rover_socket.connect((ip, port))\n self.messagesListenerThread = threading.Thread(target=self.handle_incoming_messages)\n self.messagesListenerThread.start()\n self.messagesListenerThread.join()\n finally:\n self.cleanUp()\n\n def handle_incoming_messages(self):\n \"\"\"Listens for messages from the Rover.\"\"\"\n while True:\n message = NetworkHandler().receive(self.rover_socket)\n if message:\n print(message)\n self.bluetooth_port.write(message)\n else:\n return\n\n def cleanUp(self):\n \"\"\"Closes Connections.\"\"\"\n try:\n if self.rover_socket is not None:\n self.rover_socket.close()\n if self.bluetooth_port is not None:\n self.bluetooth_port.close()\n except Exception as e:\n print(e)\n", "source": "the_stack_v2_python_sparse", "source_path": "rover/rover.py", "source_repo": "amm98d/scout-rover", "split": "test", "star_events_count": 2} {"blob_id": "6ea01fb2c31671e29aa7cc1a933729983fdf584b", "bodies": ["def json_request(*_args, **_kwargs):\n return {}\nself.mock(cleanup.net, 'json_request', json_request)\nself.failUnless(cleanup.exists('instance'))", "def json_request(*_args, **_kwargs):\n raise net.NotFoundError('404', 404, '404')\nself.mock(cleanup.net, 'json_request', json_request)\nself.failIf(cleanup.exists('instance'))", "def json_request(*_args, **_kwargs):\n raise net.AuthError('403', 403, '403')\nself.mock(cleanup.net, 'json_request', json_request)\nself.assertRaises(net.AuthError, cleanup.exists, 'instance')"], "bodies_text": "<|body_start_0|>\n def json_request(*_args, **_kwargs):\n return {}\n self.mock(cleanup.net, 'json_request', json_request)\n self.failUnless(cleanup.exists('instance'))\n<|end_body_0|>\n\n<|body_start_1|>\n def json_request(*_args, **_kwargs):\n raise net.NotFoundError('404', 404, '404')\n self.mock(cleanup.net, 'json_request', json_request)\n self.failIf(cleanup.exists('instance'))\n<|end_body_1|>\n\n<|body_start_2|>\n def json_request(*_args, **_kwargs):\n raise net.AuthError('403', 403, '403')\n self.mock(cleanup.net, 'json_request', json_request)\n self.assertRaises(net.AuthError, cleanup.exists, 'instance')\n<|end_body_2|>\n", "class_docstring": "Tests for cleanup.exists.", "class_name": "ExistsTest", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExistsTest:\n \"\"\"Tests for cleanup.exists.\"\"\"\n\n def test_exists(self):\n \"\"\"Ensures an existing entity can be detected.\"\"\"\n <|body_0|>\n\n def test_not_found(self):\n \"\"\"Ensures a non-existant entity can be detected.\"\"\"\n <|body_1|>\n\n def test_error(self):\n \"\"\"Ensures errors are surfaced.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def json_request(*_args, **_kwargs):\n return {}\n self.mock(cleanup.net, 'json_request', json_request)\n self.failUnless(cleanup.exists('instance'))\n<|end_body_0|>\n\n<|body_start_1|>\n def json_request(*_args, **_kwargs):\n raise net.NotFoundError('404', 404, '404')\n self.mock(cleanup.net, 'json_request', json_request)\n self.failIf(cleanup.exists('instance'))\n<|end_body_1|>\n\n<|body_start_2|>\n def json_request(*_args, **_kwargs):\n raise net.AuthError('403', 403, '403')\n self.mock(cleanup.net, 'json_request', json_request)\n self.assertRaises(net.AuthError, cleanup.exists, 'instance')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000235", "length_bytes": 29781, "license_type": "permissive", "methods": [{"docstring": "Ensures an existing entity can be detected.", "name": "test_exists", "signature": "def test_exists(self)"}, {"docstring": "Ensures a non-existant entity can be detected.", "name": "test_not_found", "signature": "def test_not_found(self)"}, {"docstring": "Ensures errors are surfaced.", "name": "test_error", "signature": "def test_error(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007492", "prompt": "Implement the Python class `ExistsTest` described below.\n\nClass description:\nTests for cleanup.exists.\n\nMethod signatures and docstrings:\n- def test_exists(self): Ensures an existing entity can be detected.\n- def test_not_found(self): Ensures a non-existant entity can be detected.\n- def test_error(self): Ensures errors are surfaced.", "prompted_full_text": "Implement the Python class `ExistsTest` described below.\n\nClass description:\nTests for cleanup.exists.\n\nMethod signatures and docstrings:\n- def test_exists(self): Ensures an existing entity can be detected.\n- def test_not_found(self): Ensures a non-existant entity can be detected.\n- def test_error(self): Ensures errors are surfaced.\n\n<|skeleton|>\nclass ExistsTest:\n \"\"\"Tests for cleanup.exists.\"\"\"\n\n def test_exists(self):\n \"\"\"Ensures an existing entity can be detected.\"\"\"\n <|body_0|>\n\n def test_not_found(self):\n \"\"\"Ensures a non-existant entity can be detected.\"\"\"\n <|body_1|>\n\n def test_error(self):\n \"\"\"Ensures errors are surfaced.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def json_request(*_args, **_kwargs):\n return {}\n self.mock(cleanup.net, 'json_request', json_request)\n self.failUnless(cleanup.exists('instance'))\n<|end_body_0|>\n\n<|body_start_1|>\n def json_request(*_args, **_kwargs):\n raise net.NotFoundError('404', 404, '404')\n self.mock(cleanup.net, 'json_request', json_request)\n self.failIf(cleanup.exists('instance'))\n<|end_body_1|>\n\n<|body_start_2|>\n def json_request(*_args, **_kwargs):\n raise net.AuthError('403', 403, '403')\n self.mock(cleanup.net, 'json_request', json_request)\n self.assertRaises(net.AuthError, cleanup.exists, 'instance')\n<|end_body_2|>\n", "revision_id": "0a4fdfc25f89833026be6a8b29c0a27b8f3c5fc4", "skeleton": "<|skeleton|>\nclass ExistsTest:\n \"\"\"Tests for cleanup.exists.\"\"\"\n\n def test_exists(self):\n \"\"\"Ensures an existing entity can be detected.\"\"\"\n <|body_0|>\n\n def test_not_found(self):\n \"\"\"Ensures a non-existant entity can be detected.\"\"\"\n <|body_1|>\n\n def test_error(self):\n \"\"\"Ensures errors are surfaced.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExistsTest:\n \"\"\"Tests for cleanup.exists.\"\"\"\n\n def test_exists(self):\n \"\"\"Ensures an existing entity can be detected.\"\"\"\n def json_request(*_args, **_kwargs):\n return {}\n self.mock(cleanup.net, 'json_request', json_request)\n self.failUnless(cleanup.exists('instance'))\n\n def test_not_found(self):\n \"\"\"Ensures a non-existant entity can be detected.\"\"\"\n def json_request(*_args, **_kwargs):\n raise net.NotFoundError('404', 404, '404')\n self.mock(cleanup.net, 'json_request', json_request)\n self.failIf(cleanup.exists('instance'))\n\n def test_error(self):\n \"\"\"Ensures errors are surfaced.\"\"\"\n def json_request(*_args, **_kwargs):\n raise net.AuthError('403', 403, '403')\n self.mock(cleanup.net, 'json_request', json_request)\n self.assertRaises(net.AuthError, cleanup.exists, 'instance')\n", "source": "the_stack_v2_python_sparse", "source_path": "appengine/gce-backend/cleanup_test.py", "source_repo": "Swift1313/luci-py", "split": "test", "star_events_count": 0} {"blob_id": "5b666f7761603ae3ea1a7d98e74174a1c3af2b3c", "bodies": ["self._text = text\nself._word = word\nself._occurrences = []\nidx = 0\nwhile idx < len(text):\n parse_end = min(idx + len(word), len(text))\n word_in_text = text[idx:parse_end]\n if word_in_text.lower() == word.lower():\n self._occurrences.append(idx)\n idx = parse_end\n else:\n idx += 1", "if n < 0 or n >= len(self._occurrences):\n return -1\nreturn self._occurrences[n]"], "bodies_text": "<|body_start_0|>\n self._text = text\n self._word = word\n self._occurrences = []\n idx = 0\n while idx < len(text):\n parse_end = min(idx + len(word), len(text))\n word_in_text = text[idx:parse_end]\n if word_in_text.lower() == word.lower():\n self._occurrences.append(idx)\n idx = parse_end\n else:\n idx += 1\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 0 or n >= len(self._occurrences):\n return -1\n return self._occurrences[n]\n<|end_body_1|>\n", "class_docstring": "Class to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time", "class_name": "WordOccurrences", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WordOccurrences:\n \"\"\"Class to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time\"\"\"\n\n def __init__(self, text: str, word: str):\n \"\"\"Constructor with parameter. :param text: str :param word: str\"\"\"\n <|body_0|>\n\n def get_nth_occurrence(self, n: int):\n \"\"\"Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._text = text\n self._word = word\n self._occurrences = []\n idx = 0\n while idx < len(text):\n parse_end = min(idx + len(word), len(text))\n word_in_text = text[idx:parse_end]\n if word_in_text.lower() == word.lower():\n self._occurrences.append(idx)\n idx = parse_end\n else:\n idx += 1\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 0 or n >= len(self._occurrences):\n return -1\n return self._occurrences[n]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000236", "length_bytes": 1872, "license_type": "permissive", "methods": [{"docstring": "Constructor with parameter. :param text: str :param word: str", "name": "__init__", "signature": "def __init__(self, text: str, word: str)"}, {"docstring": "Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int", "name": "get_nth_occurrence", "signature": "def get_nth_occurrence(self, n: int)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041104", "prompt": "Implement the Python class `WordOccurrences` described below.\n\nClass description:\nClass to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time\n\nMethod signatures and docstrings:\n- def __init__(self, text: str, word: str): Constructor with parameter. :param text: str :param word: str\n- def get_nth_occurrence(self, n: int): Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int", "prompted_full_text": "Implement the Python class `WordOccurrences` described below.\n\nClass description:\nClass to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time\n\nMethod signatures and docstrings:\n- def __init__(self, text: str, word: str): Constructor with parameter. :param text: str :param word: str\n- def get_nth_occurrence(self, n: int): Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int\n\n<|skeleton|>\nclass WordOccurrences:\n \"\"\"Class to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time\"\"\"\n\n def __init__(self, text: str, word: str):\n \"\"\"Constructor with parameter. :param text: str :param word: str\"\"\"\n <|body_0|>\n\n def get_nth_occurrence(self, n: int):\n \"\"\"Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._text = text\n self._word = word\n self._occurrences = []\n idx = 0\n while idx < len(text):\n parse_end = min(idx + len(word), len(text))\n word_in_text = text[idx:parse_end]\n if word_in_text.lower() == word.lower():\n self._occurrences.append(idx)\n idx = parse_end\n else:\n idx += 1\n<|end_body_0|>\n\n<|body_start_1|>\n if n < 0 or n >= len(self._occurrences):\n return -1\n return self._occurrences[n]\n<|end_body_1|>\n", "revision_id": "7a8167a85456b481aba15d5eee5a64b116b00adc", "skeleton": "<|skeleton|>\nclass WordOccurrences:\n \"\"\"Class to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time\"\"\"\n\n def __init__(self, text: str, word: str):\n \"\"\"Constructor with parameter. :param text: str :param word: str\"\"\"\n <|body_0|>\n\n def get_nth_occurrence(self, n: int):\n \"\"\"Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WordOccurrences:\n \"\"\"Class to buffer the positions of occurrences for a word within some text. Recall that one of the applicabilities of Prototype Pattern is: Avoid the inherent cost of creating a new object in the standard way (using the 'new' operator), when it us prohibitively expensive for a given application. This is exactly the case. The construction of one object is expensive since it performs expensive searching operations in the initialization process. Thus if we want to get a new instance with the same attributes using the standard way (using the 'new') operator, we have to do the expensive searching operations again. Thus, we simply use Prototype Pattern, and clones a copy of the prototype every time\"\"\"\n\n def __init__(self, text: str, word: str):\n \"\"\"Constructor with parameter. :param text: str :param word: str\"\"\"\n self._text = text\n self._word = word\n self._occurrences = []\n idx = 0\n while idx < len(text):\n parse_end = min(idx + len(word), len(text))\n word_in_text = text[idx:parse_end]\n if word_in_text.lower() == word.lower():\n self._occurrences.append(idx)\n idx = parse_end\n else:\n idx += 1\n\n def get_nth_occurrence(self, n: int):\n \"\"\"Returns the position of word's n-th occurrence in text (zero-based). :param n: int :return: int\"\"\"\n if n < 0 or n >= len(self._occurrences):\n return -1\n return self._occurrences[n]\n", "source": "the_stack_v2_python_sparse", "source_path": "2-Creational Patterns/6-Prototype Pattern/WordOccurrences Example/Python/prototype.py", "source_repo": "Ziang-Lu/Design-Patterns", "split": "test", "star_events_count": 2} {"blob_id": "a086a6042c31672ffcb30e966e92cb087dbfa7f2", "bodies": ["self.X, self.y = (X, y)\nself.y['Gender'] = GenderWrap().fit_transform(self.y['Gender'])\nself.y['Age'] = AgeRangeWrap().fit_transform(self.y['Age'])\nif data is None:\n self.features = pd.read_csv('data/' + source + '/raw/features_fin.csv', encoding='ISO-8859-1', index_col=0)\nelse:\n self.features = data", "sq = SelectionWrap(selection)\nif type == 'Both':\n df = sq.fit_transform(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\nelse:\n df = sq.fit_transform(self.features, self.y[type])\nprint(df.shape)\nreturn df", "print(self.features.shape)\nsq = ExtractionWrap(selection)\ndf = sq.fit_transform(self.features)\nprint(df.shape)\nreturn df", "if type(selection) is TruncatedSVD:\n result = pd.concat([self.X, self.y, self.applyExtraction(selection)], axis=1)\nelse:\n result = pd.concat([self.X, self.y, self.applySelection(selection, mode)], axis=1)\nreturn result", "if mode == 'Both':\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\nelse:\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y[mode])\nmodel = SelectFromModel(lr, prefit=True)\nX_new = model.transform(self.features)\norgcol = self.features.columns\ncols = []\nprint(orgcol)\nprint(model.get_support())\nfor stat, labl in zip(model.get_support(), orgcol):\n print(stat, type(stat))\n if stat == True:\n cols.append(labl)\nprint(X_new.shape)\nprint(cols)\nresult = pd.DataFrame(data=X_new, columns=cols)\nreturn pd.concat([self.X, self.y, result], axis=1)"], "bodies_text": "<|body_start_0|>\n self.X, self.y = (X, y)\n self.y['Gender'] = GenderWrap().fit_transform(self.y['Gender'])\n self.y['Age'] = AgeRangeWrap().fit_transform(self.y['Age'])\n if data is None:\n self.features = pd.read_csv('data/' + source + '/raw/features_fin.csv', encoding='ISO-8859-1', index_col=0)\n else:\n self.features = data\n<|end_body_0|>\n\n<|body_start_1|>\n sq = SelectionWrap(selection)\n if type == 'Both':\n df = sq.fit_transform(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n df = sq.fit_transform(self.features, self.y[type])\n print(df.shape)\n return df\n<|end_body_1|>\n\n<|body_start_2|>\n print(self.features.shape)\n sq = ExtractionWrap(selection)\n df = sq.fit_transform(self.features)\n print(df.shape)\n return df\n<|end_body_2|>\n\n<|body_start_3|>\n if type(selection) is TruncatedSVD:\n result = pd.concat([self.X, self.y, self.applyExtraction(selection)], axis=1)\n else:\n result = pd.concat([self.X, self.y, self.applySelection(selection, mode)], axis=1)\n return result\n<|end_body_3|>\n\n<|body_start_4|>\n if mode == 'Both':\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y[mode])\n model = SelectFromModel(lr, prefit=True)\n X_new = model.transform(self.features)\n orgcol = self.features.columns\n cols = []\n print(orgcol)\n print(model.get_support())\n for stat, labl in zip(model.get_support(), orgcol):\n print(stat, type(stat))\n if stat == True:\n cols.append(labl)\n print(X_new.shape)\n print(cols)\n result = pd.DataFrame(data=X_new, columns=cols)\n return pd.concat([self.X, self.y, result], axis=1)\n<|end_body_4|>\n", "class_docstring": "Applies dimension reduction to data", "class_name": "Feature", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Feature:\n \"\"\"Applies dimension reduction to data\"\"\"\n\n def __init__(self, X, y, source, data=None):\n \"\"\":param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data\"\"\"\n <|body_0|>\n\n def applySelection(self, selection, type):\n \"\"\"applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data\"\"\"\n <|body_1|>\n\n def applyExtraction(self, selection):\n \"\"\"applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data\"\"\"\n <|body_2|>\n\n def getFeatures(self, selection, mode):\n \"\"\"applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data\"\"\"\n <|body_3|>\n\n def useLasso(self, mode):\n \"\"\"applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X, self.y = (X, y)\n self.y['Gender'] = GenderWrap().fit_transform(self.y['Gender'])\n self.y['Age'] = AgeRangeWrap().fit_transform(self.y['Age'])\n if data is None:\n self.features = pd.read_csv('data/' + source + '/raw/features_fin.csv', encoding='ISO-8859-1', index_col=0)\n else:\n self.features = data\n<|end_body_0|>\n\n<|body_start_1|>\n sq = SelectionWrap(selection)\n if type == 'Both':\n df = sq.fit_transform(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n df = sq.fit_transform(self.features, self.y[type])\n print(df.shape)\n return df\n<|end_body_1|>\n\n<|body_start_2|>\n print(self.features.shape)\n sq = ExtractionWrap(selection)\n df = sq.fit_transform(self.features)\n print(df.shape)\n return df\n<|end_body_2|>\n\n<|body_start_3|>\n if type(selection) is TruncatedSVD:\n result = pd.concat([self.X, self.y, self.applyExtraction(selection)], axis=1)\n else:\n result = pd.concat([self.X, self.y, self.applySelection(selection, mode)], axis=1)\n return result\n<|end_body_3|>\n\n<|body_start_4|>\n if mode == 'Both':\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y[mode])\n model = SelectFromModel(lr, prefit=True)\n X_new = model.transform(self.features)\n orgcol = self.features.columns\n cols = []\n print(orgcol)\n print(model.get_support())\n for stat, labl in zip(model.get_support(), orgcol):\n print(stat, type(stat))\n if stat == True:\n cols.append(labl)\n print(X_new.shape)\n print(cols)\n result = pd.DataFrame(data=X_new, columns=cols)\n return pd.concat([self.X, self.y, result], axis=1)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000237", "length_bytes": 3915, "license_type": "no_license", "methods": [{"docstring": ":param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data", "name": "__init__", "signature": "def __init__(self, X, y, source, data=None)"}, {"docstring": "applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data", "name": "applySelection", "signature": "def applySelection(self, selection, type)"}, {"docstring": "applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data", "name": "applyExtraction", "signature": "def applyExtraction(self, selection)"}, {"docstring": "applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data", "name": "getFeatures", "signature": "def getFeatures(self, selection, mode)"}, {"docstring": "applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data", "name": "useLasso", "signature": "def useLasso(self, mode)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_007989", "prompt": "Implement the Python class `Feature` described below.\n\nClass description:\nApplies dimension reduction to data\n\nMethod signatures and docstrings:\n- def __init__(self, X, y, source, data=None): :param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data\n- def applySelection(self, selection, type): applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data\n- def applyExtraction(self, selection): applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data\n- def getFeatures(self, selection, mode): applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data\n- def useLasso(self, mode): applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data", "prompted_full_text": "Implement the Python class `Feature` described below.\n\nClass description:\nApplies dimension reduction to data\n\nMethod signatures and docstrings:\n- def __init__(self, X, y, source, data=None): :param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data\n- def applySelection(self, selection, type): applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data\n- def applyExtraction(self, selection): applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data\n- def getFeatures(self, selection, mode): applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data\n- def useLasso(self, mode): applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data\n\n<|skeleton|>\nclass Feature:\n \"\"\"Applies dimension reduction to data\"\"\"\n\n def __init__(self, X, y, source, data=None):\n \"\"\":param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data\"\"\"\n <|body_0|>\n\n def applySelection(self, selection, type):\n \"\"\"applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data\"\"\"\n <|body_1|>\n\n def applyExtraction(self, selection):\n \"\"\"applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data\"\"\"\n <|body_2|>\n\n def getFeatures(self, selection, mode):\n \"\"\"applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data\"\"\"\n <|body_3|>\n\n def useLasso(self, mode):\n \"\"\"applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.X, self.y = (X, y)\n self.y['Gender'] = GenderWrap().fit_transform(self.y['Gender'])\n self.y['Age'] = AgeRangeWrap().fit_transform(self.y['Age'])\n if data is None:\n self.features = pd.read_csv('data/' + source + '/raw/features_fin.csv', encoding='ISO-8859-1', index_col=0)\n else:\n self.features = data\n<|end_body_0|>\n\n<|body_start_1|>\n sq = SelectionWrap(selection)\n if type == 'Both':\n df = sq.fit_transform(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n df = sq.fit_transform(self.features, self.y[type])\n print(df.shape)\n return df\n<|end_body_1|>\n\n<|body_start_2|>\n print(self.features.shape)\n sq = ExtractionWrap(selection)\n df = sq.fit_transform(self.features)\n print(df.shape)\n return df\n<|end_body_2|>\n\n<|body_start_3|>\n if type(selection) is TruncatedSVD:\n result = pd.concat([self.X, self.y, self.applyExtraction(selection)], axis=1)\n else:\n result = pd.concat([self.X, self.y, self.applySelection(selection, mode)], axis=1)\n return result\n<|end_body_3|>\n\n<|body_start_4|>\n if mode == 'Both':\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y[mode])\n model = SelectFromModel(lr, prefit=True)\n X_new = model.transform(self.features)\n orgcol = self.features.columns\n cols = []\n print(orgcol)\n print(model.get_support())\n for stat, labl in zip(model.get_support(), orgcol):\n print(stat, type(stat))\n if stat == True:\n cols.append(labl)\n print(X_new.shape)\n print(cols)\n result = pd.DataFrame(data=X_new, columns=cols)\n return pd.concat([self.X, self.y, result], axis=1)\n<|end_body_4|>\n", "revision_id": "47d66e062f60993f3bf6f59a64d6e19b7d178333", "skeleton": "<|skeleton|>\nclass Feature:\n \"\"\"Applies dimension reduction to data\"\"\"\n\n def __init__(self, X, y, source, data=None):\n \"\"\":param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data\"\"\"\n <|body_0|>\n\n def applySelection(self, selection, type):\n \"\"\"applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data\"\"\"\n <|body_1|>\n\n def applyExtraction(self, selection):\n \"\"\"applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data\"\"\"\n <|body_2|>\n\n def getFeatures(self, selection, mode):\n \"\"\"applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data\"\"\"\n <|body_3|>\n\n def useLasso(self, mode):\n \"\"\"applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Feature:\n \"\"\"Applies dimension reduction to data\"\"\"\n\n def __init__(self, X, y, source, data=None):\n \"\"\":param X: text data :param y: classes (gender and age) :param source: twitter, facebook, or merged :param data: features of the data\"\"\"\n self.X, self.y = (X, y)\n self.y['Gender'] = GenderWrap().fit_transform(self.y['Gender'])\n self.y['Age'] = AgeRangeWrap().fit_transform(self.y['Age'])\n if data is None:\n self.features = pd.read_csv('data/' + source + '/raw/features_fin.csv', encoding='ISO-8859-1', index_col=0)\n else:\n self.features = data\n\n def applySelection(self, selection, type):\n \"\"\"applies feature selection :param selection: feature selection technique :param type: Gender, Age, or Both :return: feature selected data\"\"\"\n sq = SelectionWrap(selection)\n if type == 'Both':\n df = sq.fit_transform(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n df = sq.fit_transform(self.features, self.y[type])\n print(df.shape)\n return df\n\n def applyExtraction(self, selection):\n \"\"\"applies feature selection :param selection: feature extraction technique :param type: Gender, Age, or Both :return: feature extracted data\"\"\"\n print(self.features.shape)\n sq = ExtractionWrap(selection)\n df = sq.fit_transform(self.features)\n print(df.shape)\n return df\n\n def getFeatures(self, selection, mode):\n \"\"\"applies feature selection or extraction :param selection: feature selection or extraction technique :param mode: Gender, Age, or Both :return: feature selected or extracted data\"\"\"\n if type(selection) is TruncatedSVD:\n result = pd.concat([self.X, self.y, self.applyExtraction(selection)], axis=1)\n else:\n result = pd.concat([self.X, self.y, self.applySelection(selection, mode)], axis=1)\n return result\n\n def useLasso(self, mode):\n \"\"\"applies LASSO feature selection :param selection: feature selection or extraction technique :return: feature selected data\"\"\"\n if mode == 'Both':\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y['Gender'].map(str) + self.y['Age'].map(str))\n else:\n lr = LogisticRegression(penalty='l1', dual=False).fit(self.features, self.y[mode])\n model = SelectFromModel(lr, prefit=True)\n X_new = model.transform(self.features)\n orgcol = self.features.columns\n cols = []\n print(orgcol)\n print(model.get_support())\n for stat, labl in zip(model.get_support(), orgcol):\n print(stat, type(stat))\n if stat == True:\n cols.append(labl)\n print(X_new.shape)\n print(cols)\n result = pd.DataFrame(data=X_new, columns=cols)\n return pd.concat([self.X, self.y, result], axis=1)\n", "source": "the_stack_v2_python_sparse", "source_path": "features/Feature.py", "source_repo": "jankristoffercheng/thesis", "split": "test", "star_events_count": 0} {"blob_id": "cda8a95a0326422df64c483589df60577a80b951", "bodies": ["result = self.subject.pop3_result\n'判断结果'\nif not result:\n return\n'解析邮件内容'\nfor item in result:\n '解析退信内容 获取 sendResult'\n parseTextResult = self.parseText(item)\n self.logger.info('解析退信内容结果为:%s' % parseTextResult)\n '解析eml内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseEML(item)\n self.logger.info('解析eml内容结果为:%s' % parseEMLResult)\n if not parseEMLResult.get('mobile'):\n '解析HTML内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseHtml(item)\n self.logger.info('解析HTML内容结果为:%s' % parseEMLResult)\n '更新记录'\n item.update(parseTextResult)\n item.update(parseEMLResult)", "result = {'sendResult': item.get('sendSubject', ''), 'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\nhtml_path = item.get('resultHtml', '')\n'文件是否存在 或后缀名不正确'\nif not html_path or html_path.find('html') < 0:\n return result\n'读取内容'\nfile_info = BusinessUtil.read_file(html_path)\n'判断内容是否为空'\nif not file_info:\n return result\n'解析数据'\nparse_text_result = BusinessUtil.parse_eml_text_param(file_info)\n'更新'\nresult.update(parse_text_result)\nreturn result", "result = {'sendResult': ''}\ntext_path = item.get('resultText', '')\n'文件是否存在'\nif not text_path:\n return result\n'读取文件内容'\ntext_content = BusinessUtil.read_file(text_path)\n'解析'\nsendResult = BusinessUtil.search_send_result(text_content)\n'结果不为空'\nif sendResult:\n result['sendResult'] = sendResult[0].strip()\nreturn result", "result = {'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\neml_path = item.get('resultAttach', '')\n'文件是否存在 或后缀名不正确'\nif not eml_path or eml_path.find('eml') < 0:\n return result\n'解析邮件内容'\nparse_eml_result = BusinessUtil.parse_eml(eml_path)\n'获取发送账号'\nresult.update({'email': parse_eml_result.get('To')})\n'获取文件内容'\nparse_content = parse_eml_result.get('content')\n'解析数据'\nparse_text_result = BusinessUtil.parse_eml_text_param(parse_content)\n'更新'\nresult.update(parse_text_result)\nreturn result"], "bodies_text": "<|body_start_0|>\n result = self.subject.pop3_result\n '判断结果'\n if not result:\n return\n '解析邮件内容'\n for item in result:\n '解析退信内容 获取 sendResult'\n parseTextResult = self.parseText(item)\n self.logger.info('解析退信内容结果为:%s' % parseTextResult)\n '解析eml内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseEML(item)\n self.logger.info('解析eml内容结果为:%s' % parseEMLResult)\n if not parseEMLResult.get('mobile'):\n '解析HTML内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseHtml(item)\n self.logger.info('解析HTML内容结果为:%s' % parseEMLResult)\n '更新记录'\n item.update(parseTextResult)\n item.update(parseEMLResult)\n<|end_body_0|>\n\n<|body_start_1|>\n result = {'sendResult': item.get('sendSubject', ''), 'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n html_path = item.get('resultHtml', '')\n '文件是否存在 或后缀名不正确'\n if not html_path or html_path.find('html') < 0:\n return result\n '读取内容'\n file_info = BusinessUtil.read_file(html_path)\n '判断内容是否为空'\n if not file_info:\n return result\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(file_info)\n '更新'\n result.update(parse_text_result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n result = {'sendResult': ''}\n text_path = item.get('resultText', '')\n '文件是否存在'\n if not text_path:\n return result\n '读取文件内容'\n text_content = BusinessUtil.read_file(text_path)\n '解析'\n sendResult = BusinessUtil.search_send_result(text_content)\n '结果不为空'\n if sendResult:\n result['sendResult'] = sendResult[0].strip()\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n result = {'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n eml_path = item.get('resultAttach', '')\n '文件是否存在 或后缀名不正确'\n if not eml_path or eml_path.find('eml') < 0:\n return result\n '解析邮件内容'\n parse_eml_result = BusinessUtil.parse_eml(eml_path)\n '获取发送账号'\n result.update({'email': parse_eml_result.get('To')})\n '获取文件内容'\n parse_content = parse_eml_result.get('content')\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(parse_content)\n '更新'\n result.update(parse_text_result)\n return result\n<|end_body_3|>\n", "class_docstring": "解析邮件内容", "class_name": "ParseEmailObserver", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ParseEmailObserver:\n \"\"\"解析邮件内容\"\"\"\n\n def update(self):\n \"\"\"解析邮件内容 :return:\"\"\"\n <|body_0|>\n\n def parseHtml(self, item):\n \"\"\"解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n <|body_1|>\n\n def parseText(self, item):\n \"\"\"解析退信内容 获取sendResult :param item: :return:\"\"\"\n <|body_2|>\n\n def parseEML(self, item):\n \"\"\"解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = self.subject.pop3_result\n '判断结果'\n if not result:\n return\n '解析邮件内容'\n for item in result:\n '解析退信内容 获取 sendResult'\n parseTextResult = self.parseText(item)\n self.logger.info('解析退信内容结果为:%s' % parseTextResult)\n '解析eml内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseEML(item)\n self.logger.info('解析eml内容结果为:%s' % parseEMLResult)\n if not parseEMLResult.get('mobile'):\n '解析HTML内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseHtml(item)\n self.logger.info('解析HTML内容结果为:%s' % parseEMLResult)\n '更新记录'\n item.update(parseTextResult)\n item.update(parseEMLResult)\n<|end_body_0|>\n\n<|body_start_1|>\n result = {'sendResult': item.get('sendSubject', ''), 'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n html_path = item.get('resultHtml', '')\n '文件是否存在 或后缀名不正确'\n if not html_path or html_path.find('html') < 0:\n return result\n '读取内容'\n file_info = BusinessUtil.read_file(html_path)\n '判断内容是否为空'\n if not file_info:\n return result\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(file_info)\n '更新'\n result.update(parse_text_result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n result = {'sendResult': ''}\n text_path = item.get('resultText', '')\n '文件是否存在'\n if not text_path:\n return result\n '读取文件内容'\n text_content = BusinessUtil.read_file(text_path)\n '解析'\n sendResult = BusinessUtil.search_send_result(text_content)\n '结果不为空'\n if sendResult:\n result['sendResult'] = sendResult[0].strip()\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n result = {'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n eml_path = item.get('resultAttach', '')\n '文件是否存在 或后缀名不正确'\n if not eml_path or eml_path.find('eml') < 0:\n return result\n '解析邮件内容'\n parse_eml_result = BusinessUtil.parse_eml(eml_path)\n '获取发送账号'\n result.update({'email': parse_eml_result.get('To')})\n '获取文件内容'\n parse_content = parse_eml_result.get('content')\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(parse_content)\n '更新'\n result.update(parse_text_result)\n return result\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000238", "length_bytes": 3154, "license_type": "no_license", "methods": [{"docstring": "解析邮件内容 :return:", "name": "update", "signature": "def update(self)"}, {"docstring": "解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:", "name": "parseHtml", "signature": "def parseHtml(self, item)"}, {"docstring": "解析退信内容 获取sendResult :param item: :return:", "name": "parseText", "signature": "def parseText(self, item)"}, {"docstring": "解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:", "name": "parseEML", "signature": "def parseEML(self, item)"}], "n_methods": 4, "prompt": "Implement the Python class `ParseEmailObserver` described below.\n\nClass description:\n解析邮件内容\n\nMethod signatures and docstrings:\n- def update(self): 解析邮件内容 :return:\n- def parseHtml(self, item): 解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\n- def parseText(self, item): 解析退信内容 获取sendResult :param item: :return:\n- def parseEML(self, item): 解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:", "prompted_full_text": "Implement the Python class `ParseEmailObserver` described below.\n\nClass description:\n解析邮件内容\n\nMethod signatures and docstrings:\n- def update(self): 解析邮件内容 :return:\n- def parseHtml(self, item): 解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\n- def parseText(self, item): 解析退信内容 获取sendResult :param item: :return:\n- def parseEML(self, item): 解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\n\n<|skeleton|>\nclass ParseEmailObserver:\n \"\"\"解析邮件内容\"\"\"\n\n def update(self):\n \"\"\"解析邮件内容 :return:\"\"\"\n <|body_0|>\n\n def parseHtml(self, item):\n \"\"\"解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n <|body_1|>\n\n def parseText(self, item):\n \"\"\"解析退信内容 获取sendResult :param item: :return:\"\"\"\n <|body_2|>\n\n def parseEML(self, item):\n \"\"\"解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = self.subject.pop3_result\n '判断结果'\n if not result:\n return\n '解析邮件内容'\n for item in result:\n '解析退信内容 获取 sendResult'\n parseTextResult = self.parseText(item)\n self.logger.info('解析退信内容结果为:%s' % parseTextResult)\n '解析eml内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseEML(item)\n self.logger.info('解析eml内容结果为:%s' % parseEMLResult)\n if not parseEMLResult.get('mobile'):\n '解析HTML内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseHtml(item)\n self.logger.info('解析HTML内容结果为:%s' % parseEMLResult)\n '更新记录'\n item.update(parseTextResult)\n item.update(parseEMLResult)\n<|end_body_0|>\n\n<|body_start_1|>\n result = {'sendResult': item.get('sendSubject', ''), 'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n html_path = item.get('resultHtml', '')\n '文件是否存在 或后缀名不正确'\n if not html_path or html_path.find('html') < 0:\n return result\n '读取内容'\n file_info = BusinessUtil.read_file(html_path)\n '判断内容是否为空'\n if not file_info:\n return result\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(file_info)\n '更新'\n result.update(parse_text_result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n result = {'sendResult': ''}\n text_path = item.get('resultText', '')\n '文件是否存在'\n if not text_path:\n return result\n '读取文件内容'\n text_content = BusinessUtil.read_file(text_path)\n '解析'\n sendResult = BusinessUtil.search_send_result(text_content)\n '结果不为空'\n if sendResult:\n result['sendResult'] = sendResult[0].strip()\n return result\n<|end_body_2|>\n\n<|body_start_3|>\n result = {'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n eml_path = item.get('resultAttach', '')\n '文件是否存在 或后缀名不正确'\n if not eml_path or eml_path.find('eml') < 0:\n return result\n '解析邮件内容'\n parse_eml_result = BusinessUtil.parse_eml(eml_path)\n '获取发送账号'\n result.update({'email': parse_eml_result.get('To')})\n '获取文件内容'\n parse_content = parse_eml_result.get('content')\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(parse_content)\n '更新'\n result.update(parse_text_result)\n return result\n<|end_body_3|>\n", "revision_id": "1d8d64b81d657f79d0bdd4903856512adf7375db", "skeleton": "<|skeleton|>\nclass ParseEmailObserver:\n \"\"\"解析邮件内容\"\"\"\n\n def update(self):\n \"\"\"解析邮件内容 :return:\"\"\"\n <|body_0|>\n\n def parseHtml(self, item):\n \"\"\"解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n <|body_1|>\n\n def parseText(self, item):\n \"\"\"解析退信内容 获取sendResult :param item: :return:\"\"\"\n <|body_2|>\n\n def parseEML(self, item):\n \"\"\"解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ParseEmailObserver:\n \"\"\"解析邮件内容\"\"\"\n\n def update(self):\n \"\"\"解析邮件内容 :return:\"\"\"\n result = self.subject.pop3_result\n '判断结果'\n if not result:\n return\n '解析邮件内容'\n for item in result:\n '解析退信内容 获取 sendResult'\n parseTextResult = self.parseText(item)\n self.logger.info('解析退信内容结果为:%s' % parseTextResult)\n '解析eml内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseEML(item)\n self.logger.info('解析eml内容结果为:%s' % parseEMLResult)\n if not parseEMLResult.get('mobile'):\n '解析HTML内容 获取 mobile、batchCode、subBatchCode、templateCode'\n parseEMLResult = self.parseHtml(item)\n self.logger.info('解析HTML内容结果为:%s' % parseEMLResult)\n '更新记录'\n item.update(parseTextResult)\n item.update(parseEMLResult)\n\n def parseHtml(self, item):\n \"\"\"解析退信内容 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n result = {'sendResult': item.get('sendSubject', ''), 'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n html_path = item.get('resultHtml', '')\n '文件是否存在 或后缀名不正确'\n if not html_path or html_path.find('html') < 0:\n return result\n '读取内容'\n file_info = BusinessUtil.read_file(html_path)\n '判断内容是否为空'\n if not file_info:\n return result\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(file_info)\n '更新'\n result.update(parse_text_result)\n return result\n\n def parseText(self, item):\n \"\"\"解析退信内容 获取sendResult :param item: :return:\"\"\"\n result = {'sendResult': ''}\n text_path = item.get('resultText', '')\n '文件是否存在'\n if not text_path:\n return result\n '读取文件内容'\n text_content = BusinessUtil.read_file(text_path)\n '解析'\n sendResult = BusinessUtil.search_send_result(text_content)\n '结果不为空'\n if sendResult:\n result['sendResult'] = sendResult[0].strip()\n return result\n\n def parseEML(self, item):\n \"\"\"解析eml后缀文件 获取mobile、batchCode、subBatchCode、templateCode :param item: :return:\"\"\"\n result = {'mobile': '', 'batchCode': '', 'subBatchCode': '', 'templateCode': '', 'email': ''}\n eml_path = item.get('resultAttach', '')\n '文件是否存在 或后缀名不正确'\n if not eml_path or eml_path.find('eml') < 0:\n return result\n '解析邮件内容'\n parse_eml_result = BusinessUtil.parse_eml(eml_path)\n '获取发送账号'\n result.update({'email': parse_eml_result.get('To')})\n '获取文件内容'\n parse_content = parse_eml_result.get('content')\n '解析数据'\n parse_text_result = BusinessUtil.parse_eml_text_param(parse_content)\n '更新'\n result.update(parse_text_result)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "edm/observer/parse_email_observer.py", "source_repo": "sunweipeng/edm", "split": "test", "star_events_count": 0} {"blob_id": "579dc0ff7374e3c15307c95e501131d84b62ebae", "bodies": ["if not parse_node:\n raise TypeError('parse_node cannot be null.')\ntry:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\nexcept AttributeError:\n mapping_value = None\nif mapping_value and mapping_value.casefold() == '#microsoft.graph.richLongRunningOperation'.casefold():\n from .rich_long_running_operation import RichLongRunningOperation\n return RichLongRunningOperation()\nreturn LongRunningOperation()", "from .entity import Entity\nfrom .long_running_operation_status import LongRunningOperationStatus\nfrom .rich_long_running_operation import RichLongRunningOperation\nfrom .entity import Entity\nfrom .long_running_operation_status import LongRunningOperationStatus\nfrom .rich_long_running_operation import RichLongRunningOperation\nfields: Dict[str, Callable[[Any], None]] = {'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'lastActionDateTime': lambda n: setattr(self, 'last_action_date_time', n.get_datetime_value()), 'resourceLocation': lambda n: setattr(self, 'resource_location', n.get_str_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(LongRunningOperationStatus)), 'statusDetail': lambda n: setattr(self, 'status_detail', n.get_str_value())}\nsuper_fields = super().get_field_deserializers()\nfields.update(super_fields)\nreturn fields", "if not writer:\n raise TypeError('writer cannot be null.')\nsuper().serialize(writer)\nwriter.write_datetime_value('createdDateTime', self.created_date_time)\nwriter.write_datetime_value('lastActionDateTime', self.last_action_date_time)\nwriter.write_str_value('resourceLocation', self.resource_location)\nwriter.write_enum_value('status', self.status)\nwriter.write_str_value('statusDetail', self.status_detail)"], "bodies_text": "<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.richLongRunningOperation'.casefold():\n from .rich_long_running_operation import RichLongRunningOperation\n return RichLongRunningOperation()\n return LongRunningOperation()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n fields: Dict[str, Callable[[Any], None]] = {'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'lastActionDateTime': lambda n: setattr(self, 'last_action_date_time', n.get_datetime_value()), 'resourceLocation': lambda n: setattr(self, 'resource_location', n.get_str_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(LongRunningOperationStatus)), 'statusDetail': lambda n: setattr(self, 'status_detail', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_datetime_value('lastActionDateTime', self.last_action_date_time)\n writer.write_str_value('resourceLocation', self.resource_location)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('statusDetail', self.status_detail)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "LongRunningOperation", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LongRunningOperation:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.richLongRunningOperation'.casefold():\n from .rich_long_running_operation import RichLongRunningOperation\n return RichLongRunningOperation()\n return LongRunningOperation()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n fields: Dict[str, Callable[[Any], None]] = {'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'lastActionDateTime': lambda n: setattr(self, 'last_action_date_time', n.get_datetime_value()), 'resourceLocation': lambda n: setattr(self, 'resource_location', n.get_str_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(LongRunningOperationStatus)), 'statusDetail': lambda n: setattr(self, 'status_detail', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_datetime_value('lastActionDateTime', self.last_action_date_time)\n writer.write_str_value('resourceLocation', self.resource_location)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('statusDetail', self.status_detail)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000239", "length_bytes": 4418, "license_type": "permissive", "methods": [{"docstring": "Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation", "name": "create_from_discriminator_value", "signature": "def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation"}, {"docstring": "The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]", "name": "get_field_deserializers", "signature": "def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]"}, {"docstring": "Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "name": "serialize", "signature": "def serialize(self, writer: SerializationWriter) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_007665", "prompt": "Implement the Python class `LongRunningOperation` described below.\n\nClass description:\nImplement the LongRunningOperation class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model", "prompted_full_text": "Implement the Python class `LongRunningOperation` described below.\n\nClass description:\nImplement the LongRunningOperation class.\n\nMethod signatures and docstrings:\n- def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation: Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation\n- def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]: The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\n- def serialize(self, writer: SerializationWriter) -> None: Serializes information the current object Args: writer: Serialization writer to use to serialize this model\n\n<|skeleton|>\nclass LongRunningOperation:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.richLongRunningOperation'.casefold():\n from .rich_long_running_operation import RichLongRunningOperation\n return RichLongRunningOperation()\n return LongRunningOperation()\n<|end_body_0|>\n\n<|body_start_1|>\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n fields: Dict[str, Callable[[Any], None]] = {'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'lastActionDateTime': lambda n: setattr(self, 'last_action_date_time', n.get_datetime_value()), 'resourceLocation': lambda n: setattr(self, 'resource_location', n.get_str_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(LongRunningOperationStatus)), 'statusDetail': lambda n: setattr(self, 'status_detail', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n<|end_body_1|>\n\n<|body_start_2|>\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_datetime_value('lastActionDateTime', self.last_action_date_time)\n writer.write_str_value('resourceLocation', self.resource_location)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('statusDetail', self.status_detail)\n<|end_body_2|>\n", "revision_id": "27de7ccbe688d7614b2f6bde0fdbcda4bc5cc949", "skeleton": "<|skeleton|>\nclass LongRunningOperation:\n\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation\"\"\"\n <|body_0|>\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n <|body_1|>\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LongRunningOperation:\n def create_from_discriminator_value(parse_node: Optional[ParseNode]=None) -> LongRunningOperation:\n \"\"\"Creates a new instance of the appropriate class based on discriminator value Args: parse_node: The parse node to use to read the discriminator value and create the object Returns: LongRunningOperation\"\"\"\n if not parse_node:\n raise TypeError('parse_node cannot be null.')\n try:\n mapping_value = parse_node.get_child_node('@odata.type').get_str_value()\n except AttributeError:\n mapping_value = None\n if mapping_value and mapping_value.casefold() == '#microsoft.graph.richLongRunningOperation'.casefold():\n from .rich_long_running_operation import RichLongRunningOperation\n return RichLongRunningOperation()\n return LongRunningOperation()\n\n def get_field_deserializers(self) -> Dict[str, Callable[[ParseNode], None]]:\n \"\"\"The deserialization information for the current model Returns: Dict[str, Callable[[ParseNode], None]]\"\"\"\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n from .entity import Entity\n from .long_running_operation_status import LongRunningOperationStatus\n from .rich_long_running_operation import RichLongRunningOperation\n fields: Dict[str, Callable[[Any], None]] = {'createdDateTime': lambda n: setattr(self, 'created_date_time', n.get_datetime_value()), 'lastActionDateTime': lambda n: setattr(self, 'last_action_date_time', n.get_datetime_value()), 'resourceLocation': lambda n: setattr(self, 'resource_location', n.get_str_value()), 'status': lambda n: setattr(self, 'status', n.get_enum_value(LongRunningOperationStatus)), 'statusDetail': lambda n: setattr(self, 'status_detail', n.get_str_value())}\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields\n\n def serialize(self, writer: SerializationWriter) -> None:\n \"\"\"Serializes information the current object Args: writer: Serialization writer to use to serialize this model\"\"\"\n if not writer:\n raise TypeError('writer cannot be null.')\n super().serialize(writer)\n writer.write_datetime_value('createdDateTime', self.created_date_time)\n writer.write_datetime_value('lastActionDateTime', self.last_action_date_time)\n writer.write_str_value('resourceLocation', self.resource_location)\n writer.write_enum_value('status', self.status)\n writer.write_str_value('statusDetail', self.status_detail)\n", "source": "the_stack_v2_python_sparse", "source_path": "msgraph/generated/models/long_running_operation.py", "source_repo": "microsoftgraph/msgraph-sdk-python", "split": "test", "star_events_count": 135} {"blob_id": "b4b7c8691745c70658bcdd125367f36b08454d00", "bodies": ["super(OneShot, self).__init__(name=name, child=child)\nself.final_status: typing.Optional[common.Status] = None\nself.policy = policy", "if self.final_status:\n self.logger.debug('{}.update()[bouncing]'.format(self.__class__.__name__))\n return self.final_status\nreturn self.decorated.status", "if self.final_status:\n for node in behaviour.Behaviour.tick(self):\n yield node\nelse:\n for node in Decorator.tick(self):\n yield node", "if not self.final_status and new_status in self.policy.value:\n self.logger.debug('{}.terminate({})[oneshot completed]'.format(self.__class__.__name__, new_status))\n self.feedback_message = 'oneshot completed'\n self.final_status = new_status\nelse:\n self.logger.debug('{}.terminate({})'.format(self.__class__.__name__, new_status))"], "bodies_text": "<|body_start_0|>\n super(OneShot, self).__init__(name=name, child=child)\n self.final_status: typing.Optional[common.Status] = None\n self.policy = policy\n<|end_body_0|>\n\n<|body_start_1|>\n if self.final_status:\n self.logger.debug('{}.update()[bouncing]'.format(self.__class__.__name__))\n return self.final_status\n return self.decorated.status\n<|end_body_1|>\n\n<|body_start_2|>\n if self.final_status:\n for node in behaviour.Behaviour.tick(self):\n yield node\n else:\n for node in Decorator.tick(self):\n yield node\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.final_status and new_status in self.policy.value:\n self.logger.debug('{}.terminate({})[oneshot completed]'.format(self.__class__.__name__, new_status))\n self.feedback_message = 'oneshot completed'\n self.final_status = new_status\n else:\n self.logger.debug('{}.terminate({})'.format(self.__class__.__name__, new_status))\n<|end_body_3|>\n", "class_docstring": "A decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo", "class_name": "OneShot", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass OneShot:\n \"\"\"A decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo\"\"\"\n\n def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy):\n \"\"\"Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate\"\"\"\n <|body_0|>\n\n def update(self) -> common.Status:\n \"\"\"Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`\"\"\"\n <|body_1|>\n\n def tick(self) -> typing.Iterator[behaviour.Behaviour]:\n \"\"\"Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree\"\"\"\n <|body_2|>\n\n def terminate(self, new_status: common.Status) -> None:\n \"\"\"Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(OneShot, self).__init__(name=name, child=child)\n self.final_status: typing.Optional[common.Status] = None\n self.policy = policy\n<|end_body_0|>\n\n<|body_start_1|>\n if self.final_status:\n self.logger.debug('{}.update()[bouncing]'.format(self.__class__.__name__))\n return self.final_status\n return self.decorated.status\n<|end_body_1|>\n\n<|body_start_2|>\n if self.final_status:\n for node in behaviour.Behaviour.tick(self):\n yield node\n else:\n for node in Decorator.tick(self):\n yield node\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.final_status and new_status in self.policy.value:\n self.logger.debug('{}.terminate({})[oneshot completed]'.format(self.__class__.__name__, new_status))\n self.feedback_message = 'oneshot completed'\n self.final_status = new_status\n else:\n self.logger.debug('{}.terminate({})'.format(self.__class__.__name__, new_status))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000240", "length_bytes": 32098, "license_type": "permissive", "methods": [{"docstring": "Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate", "name": "__init__", "signature": "def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy)"}, {"docstring": "Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`", "name": "update", "signature": "def update(self) -> common.Status"}, {"docstring": "Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree", "name": "tick", "signature": "def tick(self) -> typing.Iterator[behaviour.Behaviour]"}, {"docstring": "Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.", "name": "terminate", "signature": "def terminate(self, new_status: common.Status) -> None"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_028534", "prompt": "Implement the Python class `OneShot` described below.\n\nClass description:\nA decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy): Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate\n- def update(self) -> common.Status: Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`\n- def tick(self) -> typing.Iterator[behaviour.Behaviour]: Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree\n- def terminate(self, new_status: common.Status) -> None: Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.", "prompted_full_text": "Implement the Python class `OneShot` described below.\n\nClass description:\nA decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo\n\nMethod signatures and docstrings:\n- def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy): Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate\n- def update(self) -> common.Status: Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`\n- def tick(self) -> typing.Iterator[behaviour.Behaviour]: Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree\n- def terminate(self, new_status: common.Status) -> None: Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.\n\n<|skeleton|>\nclass OneShot:\n \"\"\"A decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo\"\"\"\n\n def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy):\n \"\"\"Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate\"\"\"\n <|body_0|>\n\n def update(self) -> common.Status:\n \"\"\"Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`\"\"\"\n <|body_1|>\n\n def tick(self) -> typing.Iterator[behaviour.Behaviour]:\n \"\"\"Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree\"\"\"\n <|body_2|>\n\n def terminate(self, new_status: common.Status) -> None:\n \"\"\"Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(OneShot, self).__init__(name=name, child=child)\n self.final_status: typing.Optional[common.Status] = None\n self.policy = policy\n<|end_body_0|>\n\n<|body_start_1|>\n if self.final_status:\n self.logger.debug('{}.update()[bouncing]'.format(self.__class__.__name__))\n return self.final_status\n return self.decorated.status\n<|end_body_1|>\n\n<|body_start_2|>\n if self.final_status:\n for node in behaviour.Behaviour.tick(self):\n yield node\n else:\n for node in Decorator.tick(self):\n yield node\n<|end_body_2|>\n\n<|body_start_3|>\n if not self.final_status and new_status in self.policy.value:\n self.logger.debug('{}.terminate({})[oneshot completed]'.format(self.__class__.__name__, new_status))\n self.feedback_message = 'oneshot completed'\n self.final_status = new_status\n else:\n self.logger.debug('{}.terminate({})'.format(self.__class__.__name__, new_status))\n<|end_body_3|>\n", "revision_id": "17fc0aeed83ec57b1494deac848324ff61e64232", "skeleton": "<|skeleton|>\nclass OneShot:\n \"\"\"A decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo\"\"\"\n\n def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy):\n \"\"\"Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate\"\"\"\n <|body_0|>\n\n def update(self) -> common.Status:\n \"\"\"Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`\"\"\"\n <|body_1|>\n\n def tick(self) -> typing.Iterator[behaviour.Behaviour]:\n \"\"\"Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree\"\"\"\n <|body_2|>\n\n def terminate(self, new_status: common.Status) -> None:\n \"\"\"Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class OneShot:\n \"\"\"A decorator that implements the oneshot pattern. This decorator ensures that the underlying child is ticked through to completion just once and while doing so, will return with the same status as it's child. Thereafter it will return with the final status of the underlying child. Completion status is determined by the policy given on construction. * With policy :data:`~py_trees.common.OneShotPolicy.ON_SUCCESSFUL_COMPLETION`, the oneshot will activate only when the underlying child returns :data:`~py_trees.common.Status.SUCCESS` (i.e. it permits retries). * With policy :data:`~py_trees.common.OneShotPolicy.ON_COMPLETION`, the oneshot will activate when the child returns :data:`~py_trees.commo\"\"\"\n\n def __init__(self, name: str, child: behaviour.Behaviour, policy: common.OneShotPolicy):\n \"\"\"Init with the decorated child. Args: child: behaviour to shoot name: the decorator name policy: policy determining when the oneshot should activate\"\"\"\n super(OneShot, self).__init__(name=name, child=child)\n self.final_status: typing.Optional[common.Status] = None\n self.policy = policy\n\n def update(self) -> common.Status:\n \"\"\"Bounce if the child has already successfully completed. Returns: the behaviour's new status :class:`~py_trees.common.Status`\"\"\"\n if self.final_status:\n self.logger.debug('{}.update()[bouncing]'.format(self.__class__.__name__))\n return self.final_status\n return self.decorated.status\n\n def tick(self) -> typing.Iterator[behaviour.Behaviour]:\n \"\"\"Tick the child or bounce back with the original status if already completed. Yields: a reference to itself or a behaviour in it's child subtree\"\"\"\n if self.final_status:\n for node in behaviour.Behaviour.tick(self):\n yield node\n else:\n for node in Decorator.tick(self):\n yield node\n\n def terminate(self, new_status: common.Status) -> None:\n \"\"\"Prevent further entry if finishing with :data:`~py_trees.common.Status.SUCCESS`. This uses a flag to register that the behaviour has gone through to completion. In future ticks, it will block entry to the child and just return the original status result.\"\"\"\n if not self.final_status and new_status in self.policy.value:\n self.logger.debug('{}.terminate({})[oneshot completed]'.format(self.__class__.__name__, new_status))\n self.feedback_message = 'oneshot completed'\n self.final_status = new_status\n else:\n self.logger.debug('{}.terminate({})'.format(self.__class__.__name__, new_status))\n", "source": "the_stack_v2_python_sparse", "source_path": "py_trees/decorators.py", "source_repo": "jstyrud/py_trees", "split": "test", "star_events_count": 0} {"blob_id": "94a91369623d8affc28d32f6a63ad54caa5c09eb", "bodies": ["self.main_path = main_path\nself.ch_list = ch_list\nself.feature_labels = []\nfor n in ch_list:\n self.feature_labels += [x.__name__ + '_' + str(n) for x in param_list]\nself.feature_labels += [x.__name__ for x in cross_ch_param_list]\nself.feature_labels = np.array(self.feature_labels)\nself.df = pd.read_csv(os.path.join(Path(main_path).parents[0], 'methods_table.csv'))\nself.df[['total', 'detected', 'detected_ratio', 'false_positives']] = 0\nself.thresh = np.where(np.array(self.df.columns.str.contains('Thresh')))[0]\nself.weights = np.where(np.array(self.df.columns.str.contains('Weight')))[0]\nself.enabled = np.where(np.array(self.df.columns.str.contains('Enabled')))[0]\nif self.df.columns.str.contains('Enabled').sum() != len(self.feature_labels):\n print('Error! Size of features from csv file does not match object feature length')\n return", "print('--------------------- START --------------------------')\nprint('Testing methods on :', self.main_path)\nself.save_folder = os.path.join(self.main_path, 'model_performance')\nif os.path.exists(self.save_folder) is False:\n os.mkdir(self.save_folder)\nfolders = [f.name for f in os.scandir(self.main_path) if f.is_dir()]\nfor i in range(len(folders)):\n print('Analyzing', folders[i], '...')\n self.folder_loop(folders[i])\nself.df['detected_ratio'] = self.df['detected'] / self.df['total']\nfile_name = os.path.join(self.save_folder, 'best_method_metrics.csv')\nself.df.to_csv(file_name, header=True, index=False)\nprint('Method metrics saved to:', file_name)\nprint('----------------------- END --------------------------')", "ver_path = os.path.join(self.main_path, folder_name, 'verified_predictions_pantelis')\nif os.path.exists(ver_path) == False:\n print('path not found, skipping:', os.path.join(self.main_path, folder_name), '.')\n return False\nfilelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path)))\nfilelist = [os.path.splitext(x)[0] for x in filelist]\nfor i in tqdm(range(0, len(filelist))):\n data, y_true = get_data(os.path.join(self.main_path, folder_name), filelist[i], ch_num=ch_list, inner_path={'data_path': 'filt_data', 'pred_path': 'verified_predictions_pantelis'}, load_y=True)\n x_data, labels = get_features_allch(data, param_list, cross_ch_param_list)\n x_data = StandardScaler().fit_transform(x_data)\n bounds_true = find_szr_idx(y_true, np.array([0, 1]))\n for ii in range(len(self.df)):\n thresh = np.mean(x_data) + np.array(self.df.loc[ii][self.thresh]) * np.std(x_data)\n y_pred_array = x_data > thresh\n w = np.array(self.df.loc[ii][self.weights])\n e = np.array(self.df.loc[ii][self.enabled])\n y_pred = y_pred_array * w * e\n y_pred = np.sum(y_pred, axis=1) / np.sum(w * e)\n y_pred = y_pred > 0.5\n bounds_pred = find_szr_idx(y_pred, np.array([0, 1]))\n detected = 0\n if bounds_pred.shape[0] > 0:\n bounds_pred = merge_close(bounds_pred, merge_margin=5)\n detected = match_szrs(bounds_true, bounds_pred, err_margin=10)\n self.df['total'][ii] += bounds_true.shape[0]\n self.df['detected'][ii] += detected\n self.df['false_positives'][ii] += bounds_pred.shape[0] - detected\nreturn True"], "bodies_text": "<|body_start_0|>\n self.main_path = main_path\n self.ch_list = ch_list\n self.feature_labels = []\n for n in ch_list:\n self.feature_labels += [x.__name__ + '_' + str(n) for x in param_list]\n self.feature_labels += [x.__name__ for x in cross_ch_param_list]\n self.feature_labels = np.array(self.feature_labels)\n self.df = pd.read_csv(os.path.join(Path(main_path).parents[0], 'methods_table.csv'))\n self.df[['total', 'detected', 'detected_ratio', 'false_positives']] = 0\n self.thresh = np.where(np.array(self.df.columns.str.contains('Thresh')))[0]\n self.weights = np.where(np.array(self.df.columns.str.contains('Weight')))[0]\n self.enabled = np.where(np.array(self.df.columns.str.contains('Enabled')))[0]\n if self.df.columns.str.contains('Enabled').sum() != len(self.feature_labels):\n print('Error! Size of features from csv file does not match object feature length')\n return\n<|end_body_0|>\n\n<|body_start_1|>\n print('--------------------- START --------------------------')\n print('Testing methods on :', self.main_path)\n self.save_folder = os.path.join(self.main_path, 'model_performance')\n if os.path.exists(self.save_folder) is False:\n os.mkdir(self.save_folder)\n folders = [f.name for f in os.scandir(self.main_path) if f.is_dir()]\n for i in range(len(folders)):\n print('Analyzing', folders[i], '...')\n self.folder_loop(folders[i])\n self.df['detected_ratio'] = self.df['detected'] / self.df['total']\n file_name = os.path.join(self.save_folder, 'best_method_metrics.csv')\n self.df.to_csv(file_name, header=True, index=False)\n print('Method metrics saved to:', file_name)\n print('----------------------- END --------------------------')\n<|end_body_1|>\n\n<|body_start_2|>\n ver_path = os.path.join(self.main_path, folder_name, 'verified_predictions_pantelis')\n if os.path.exists(ver_path) == False:\n print('path not found, skipping:', os.path.join(self.main_path, folder_name), '.')\n return False\n filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path)))\n filelist = [os.path.splitext(x)[0] for x in filelist]\n for i in tqdm(range(0, len(filelist))):\n data, y_true = get_data(os.path.join(self.main_path, folder_name), filelist[i], ch_num=ch_list, inner_path={'data_path': 'filt_data', 'pred_path': 'verified_predictions_pantelis'}, load_y=True)\n x_data, labels = get_features_allch(data, param_list, cross_ch_param_list)\n x_data = StandardScaler().fit_transform(x_data)\n bounds_true = find_szr_idx(y_true, np.array([0, 1]))\n for ii in range(len(self.df)):\n thresh = np.mean(x_data) + np.array(self.df.loc[ii][self.thresh]) * np.std(x_data)\n y_pred_array = x_data > thresh\n w = np.array(self.df.loc[ii][self.weights])\n e = np.array(self.df.loc[ii][self.enabled])\n y_pred = y_pred_array * w * e\n y_pred = np.sum(y_pred, axis=1) / np.sum(w * e)\n y_pred = y_pred > 0.5\n bounds_pred = find_szr_idx(y_pred, np.array([0, 1]))\n detected = 0\n if bounds_pred.shape[0] > 0:\n bounds_pred = merge_close(bounds_pred, merge_margin=5)\n detected = match_szrs(bounds_true, bounds_pred, err_margin=10)\n self.df['total'][ii] += bounds_true.shape[0]\n self.df['detected'][ii] += detected\n self.df['false_positives'][ii] += bounds_pred.shape[0] - detected\n return True\n<|end_body_2|>\n", "class_docstring": "MethodTest Tests different feature combinations for seizure prediction obtained from testing dataset", "class_name": "MethodTest", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MethodTest:\n \"\"\"MethodTest Tests different feature combinations for seizure prediction obtained from testing dataset\"\"\"\n\n def __init__(self, main_path):\n \"\"\"ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.\"\"\"\n <|body_0|>\n\n def multi_folder(self):\n \"\"\"multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir\"\"\"\n <|body_1|>\n\n def folder_loop(self, folder_name):\n \"\"\"folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.main_path = main_path\n self.ch_list = ch_list\n self.feature_labels = []\n for n in ch_list:\n self.feature_labels += [x.__name__ + '_' + str(n) for x in param_list]\n self.feature_labels += [x.__name__ for x in cross_ch_param_list]\n self.feature_labels = np.array(self.feature_labels)\n self.df = pd.read_csv(os.path.join(Path(main_path).parents[0], 'methods_table.csv'))\n self.df[['total', 'detected', 'detected_ratio', 'false_positives']] = 0\n self.thresh = np.where(np.array(self.df.columns.str.contains('Thresh')))[0]\n self.weights = np.where(np.array(self.df.columns.str.contains('Weight')))[0]\n self.enabled = np.where(np.array(self.df.columns.str.contains('Enabled')))[0]\n if self.df.columns.str.contains('Enabled').sum() != len(self.feature_labels):\n print('Error! Size of features from csv file does not match object feature length')\n return\n<|end_body_0|>\n\n<|body_start_1|>\n print('--------------------- START --------------------------')\n print('Testing methods on :', self.main_path)\n self.save_folder = os.path.join(self.main_path, 'model_performance')\n if os.path.exists(self.save_folder) is False:\n os.mkdir(self.save_folder)\n folders = [f.name for f in os.scandir(self.main_path) if f.is_dir()]\n for i in range(len(folders)):\n print('Analyzing', folders[i], '...')\n self.folder_loop(folders[i])\n self.df['detected_ratio'] = self.df['detected'] / self.df['total']\n file_name = os.path.join(self.save_folder, 'best_method_metrics.csv')\n self.df.to_csv(file_name, header=True, index=False)\n print('Method metrics saved to:', file_name)\n print('----------------------- END --------------------------')\n<|end_body_1|>\n\n<|body_start_2|>\n ver_path = os.path.join(self.main_path, folder_name, 'verified_predictions_pantelis')\n if os.path.exists(ver_path) == False:\n print('path not found, skipping:', os.path.join(self.main_path, folder_name), '.')\n return False\n filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path)))\n filelist = [os.path.splitext(x)[0] for x in filelist]\n for i in tqdm(range(0, len(filelist))):\n data, y_true = get_data(os.path.join(self.main_path, folder_name), filelist[i], ch_num=ch_list, inner_path={'data_path': 'filt_data', 'pred_path': 'verified_predictions_pantelis'}, load_y=True)\n x_data, labels = get_features_allch(data, param_list, cross_ch_param_list)\n x_data = StandardScaler().fit_transform(x_data)\n bounds_true = find_szr_idx(y_true, np.array([0, 1]))\n for ii in range(len(self.df)):\n thresh = np.mean(x_data) + np.array(self.df.loc[ii][self.thresh]) * np.std(x_data)\n y_pred_array = x_data > thresh\n w = np.array(self.df.loc[ii][self.weights])\n e = np.array(self.df.loc[ii][self.enabled])\n y_pred = y_pred_array * w * e\n y_pred = np.sum(y_pred, axis=1) / np.sum(w * e)\n y_pred = y_pred > 0.5\n bounds_pred = find_szr_idx(y_pred, np.array([0, 1]))\n detected = 0\n if bounds_pred.shape[0] > 0:\n bounds_pred = merge_close(bounds_pred, merge_margin=5)\n detected = match_szrs(bounds_true, bounds_pred, err_margin=10)\n self.df['total'][ii] += bounds_true.shape[0]\n self.df['detected'][ii] += detected\n self.df['false_positives'][ii] += bounds_pred.shape[0] - detected\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000241", "length_bytes": 7872, "license_type": "permissive", "methods": [{"docstring": "ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.", "name": "__init__", "signature": "def __init__(self, main_path)"}, {"docstring": "multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir", "name": "multi_folder", "signature": "def multi_folder(self)"}, {"docstring": "folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool", "name": "folder_loop", "signature": "def folder_loop(self, folder_name)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_038890", "prompt": "Implement the Python class `MethodTest` described below.\n\nClass description:\nMethodTest Tests different feature combinations for seizure prediction obtained from testing dataset\n\nMethod signatures and docstrings:\n- def __init__(self, main_path): ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.\n- def multi_folder(self): multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir\n- def folder_loop(self, folder_name): folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool", "prompted_full_text": "Implement the Python class `MethodTest` described below.\n\nClass description:\nMethodTest Tests different feature combinations for seizure prediction obtained from testing dataset\n\nMethod signatures and docstrings:\n- def __init__(self, main_path): ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.\n- def multi_folder(self): multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir\n- def folder_loop(self, folder_name): folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool\n\n<|skeleton|>\nclass MethodTest:\n \"\"\"MethodTest Tests different feature combinations for seizure prediction obtained from testing dataset\"\"\"\n\n def __init__(self, main_path):\n \"\"\"ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.\"\"\"\n <|body_0|>\n\n def multi_folder(self):\n \"\"\"multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir\"\"\"\n <|body_1|>\n\n def folder_loop(self, folder_name):\n \"\"\"folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.main_path = main_path\n self.ch_list = ch_list\n self.feature_labels = []\n for n in ch_list:\n self.feature_labels += [x.__name__ + '_' + str(n) for x in param_list]\n self.feature_labels += [x.__name__ for x in cross_ch_param_list]\n self.feature_labels = np.array(self.feature_labels)\n self.df = pd.read_csv(os.path.join(Path(main_path).parents[0], 'methods_table.csv'))\n self.df[['total', 'detected', 'detected_ratio', 'false_positives']] = 0\n self.thresh = np.where(np.array(self.df.columns.str.contains('Thresh')))[0]\n self.weights = np.where(np.array(self.df.columns.str.contains('Weight')))[0]\n self.enabled = np.where(np.array(self.df.columns.str.contains('Enabled')))[0]\n if self.df.columns.str.contains('Enabled').sum() != len(self.feature_labels):\n print('Error! Size of features from csv file does not match object feature length')\n return\n<|end_body_0|>\n\n<|body_start_1|>\n print('--------------------- START --------------------------')\n print('Testing methods on :', self.main_path)\n self.save_folder = os.path.join(self.main_path, 'model_performance')\n if os.path.exists(self.save_folder) is False:\n os.mkdir(self.save_folder)\n folders = [f.name for f in os.scandir(self.main_path) if f.is_dir()]\n for i in range(len(folders)):\n print('Analyzing', folders[i], '...')\n self.folder_loop(folders[i])\n self.df['detected_ratio'] = self.df['detected'] / self.df['total']\n file_name = os.path.join(self.save_folder, 'best_method_metrics.csv')\n self.df.to_csv(file_name, header=True, index=False)\n print('Method metrics saved to:', file_name)\n print('----------------------- END --------------------------')\n<|end_body_1|>\n\n<|body_start_2|>\n ver_path = os.path.join(self.main_path, folder_name, 'verified_predictions_pantelis')\n if os.path.exists(ver_path) == False:\n print('path not found, skipping:', os.path.join(self.main_path, folder_name), '.')\n return False\n filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path)))\n filelist = [os.path.splitext(x)[0] for x in filelist]\n for i in tqdm(range(0, len(filelist))):\n data, y_true = get_data(os.path.join(self.main_path, folder_name), filelist[i], ch_num=ch_list, inner_path={'data_path': 'filt_data', 'pred_path': 'verified_predictions_pantelis'}, load_y=True)\n x_data, labels = get_features_allch(data, param_list, cross_ch_param_list)\n x_data = StandardScaler().fit_transform(x_data)\n bounds_true = find_szr_idx(y_true, np.array([0, 1]))\n for ii in range(len(self.df)):\n thresh = np.mean(x_data) + np.array(self.df.loc[ii][self.thresh]) * np.std(x_data)\n y_pred_array = x_data > thresh\n w = np.array(self.df.loc[ii][self.weights])\n e = np.array(self.df.loc[ii][self.enabled])\n y_pred = y_pred_array * w * e\n y_pred = np.sum(y_pred, axis=1) / np.sum(w * e)\n y_pred = y_pred > 0.5\n bounds_pred = find_szr_idx(y_pred, np.array([0, 1]))\n detected = 0\n if bounds_pred.shape[0] > 0:\n bounds_pred = merge_close(bounds_pred, merge_margin=5)\n detected = match_szrs(bounds_true, bounds_pred, err_margin=10)\n self.df['total'][ii] += bounds_true.shape[0]\n self.df['detected'][ii] += detected\n self.df['false_positives'][ii] += bounds_pred.shape[0] - detected\n return True\n<|end_body_2|>\n", "revision_id": "fd238749a8b80af1bd0902f737bc9017c4e29756", "skeleton": "<|skeleton|>\nclass MethodTest:\n \"\"\"MethodTest Tests different feature combinations for seizure prediction obtained from testing dataset\"\"\"\n\n def __init__(self, main_path):\n \"\"\"ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.\"\"\"\n <|body_0|>\n\n def multi_folder(self):\n \"\"\"multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir\"\"\"\n <|body_1|>\n\n def folder_loop(self, folder_name):\n \"\"\"folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MethodTest:\n \"\"\"MethodTest Tests different feature combinations for seizure prediction obtained from testing dataset\"\"\"\n\n def __init__(self, main_path):\n \"\"\"ThreshMetrics(main_path) Parameters ---------- input_path : Str, path to parent directory.\"\"\"\n self.main_path = main_path\n self.ch_list = ch_list\n self.feature_labels = []\n for n in ch_list:\n self.feature_labels += [x.__name__ + '_' + str(n) for x in param_list]\n self.feature_labels += [x.__name__ for x in cross_ch_param_list]\n self.feature_labels = np.array(self.feature_labels)\n self.df = pd.read_csv(os.path.join(Path(main_path).parents[0], 'methods_table.csv'))\n self.df[['total', 'detected', 'detected_ratio', 'false_positives']] = 0\n self.thresh = np.where(np.array(self.df.columns.str.contains('Thresh')))[0]\n self.weights = np.where(np.array(self.df.columns.str.contains('Weight')))[0]\n self.enabled = np.where(np.array(self.df.columns.str.contains('Enabled')))[0]\n if self.df.columns.str.contains('Enabled').sum() != len(self.feature_labels):\n print('Error! Size of features from csv file does not match object feature length')\n return\n\n def multi_folder(self):\n \"\"\"multi_folder(self) Loop though folder paths get seizure metrics and save to csv Parameters ---------- main_path : Str, to parent dir\"\"\"\n print('--------------------- START --------------------------')\n print('Testing methods on :', self.main_path)\n self.save_folder = os.path.join(self.main_path, 'model_performance')\n if os.path.exists(self.save_folder) is False:\n os.mkdir(self.save_folder)\n folders = [f.name for f in os.scandir(self.main_path) if f.is_dir()]\n for i in range(len(folders)):\n print('Analyzing', folders[i], '...')\n self.folder_loop(folders[i])\n self.df['detected_ratio'] = self.df['detected'] / self.df['total']\n file_name = os.path.join(self.save_folder, 'best_method_metrics.csv')\n self.df.to_csv(file_name, header=True, index=False)\n print('Method metrics saved to:', file_name)\n print('----------------------- END --------------------------')\n\n def folder_loop(self, folder_name):\n \"\"\"folder_loop(self, folder_name) Parameters ---------- folder_name : Str, folder name Returns ------- bool\"\"\"\n ver_path = os.path.join(self.main_path, folder_name, 'verified_predictions_pantelis')\n if os.path.exists(ver_path) == False:\n print('path not found, skipping:', os.path.join(self.main_path, folder_name), '.')\n return False\n filelist = list(filter(lambda k: '.csv' in k, os.listdir(ver_path)))\n filelist = [os.path.splitext(x)[0] for x in filelist]\n for i in tqdm(range(0, len(filelist))):\n data, y_true = get_data(os.path.join(self.main_path, folder_name), filelist[i], ch_num=ch_list, inner_path={'data_path': 'filt_data', 'pred_path': 'verified_predictions_pantelis'}, load_y=True)\n x_data, labels = get_features_allch(data, param_list, cross_ch_param_list)\n x_data = StandardScaler().fit_transform(x_data)\n bounds_true = find_szr_idx(y_true, np.array([0, 1]))\n for ii in range(len(self.df)):\n thresh = np.mean(x_data) + np.array(self.df.loc[ii][self.thresh]) * np.std(x_data)\n y_pred_array = x_data > thresh\n w = np.array(self.df.loc[ii][self.weights])\n e = np.array(self.df.loc[ii][self.enabled])\n y_pred = y_pred_array * w * e\n y_pred = np.sum(y_pred, axis=1) / np.sum(w * e)\n y_pred = y_pred > 0.5\n bounds_pred = find_szr_idx(y_pred, np.array([0, 1]))\n detected = 0\n if bounds_pred.shape[0] > 0:\n bounds_pred = merge_close(bounds_pred, merge_margin=5)\n detected = match_szrs(bounds_true, bounds_pred, err_margin=10)\n self.df['total'][ii] += bounds_true.shape[0]\n self.df['detected'][ii] += detected\n self.df['false_positives'][ii] += bounds_pred.shape[0] - detected\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "model_selection/find_best_models.py", "source_repo": "bhargavaganti/logic_seizedetect", "split": "test", "star_events_count": 0} {"blob_id": "8053dab16dd83f63ad75e414063bb279e3956ad0", "bodies": ["super().__init__()\ninputs_size = combination(num_fields, 2)\ninputs_size = inputs_size * embed_size * 2\nself.senet = SENETLayer(num_fields, senet_reduction, squared=False)\nself.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\nself.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\nself.deep = DNNLayer(inputs_size=inputs_size, output_size=deep_output_size, layer_sizes=deep_layer_sizes, dropout_p=deep_dropout_p, activation=deep_activation)", "emb_interaction = self.emb_bilinear(emb_inputs.rename(None))\nemb_interaction.names = ('B', 'N', 'E')\nsenet_emb = self.senet(emb_inputs.rename(None))\nsenet_interaction = self.senet_bilinear(senet_emb.rename(None))\nsenet_interaction.names = ('B', 'N', 'E')\noutputs = torch.cat([emb_interaction, senet_interaction], dim='N')\noutputs = outputs.flatten(('N', 'E'), 'O')\noutputs = self.deep(outputs.rename(None))\noutputs = outputs.rename(None)\nreturn outputs"], "bodies_text": "<|body_start_0|>\n super().__init__()\n inputs_size = combination(num_fields, 2)\n inputs_size = inputs_size * embed_size * 2\n self.senet = SENETLayer(num_fields, senet_reduction, squared=False)\n self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.deep = DNNLayer(inputs_size=inputs_size, output_size=deep_output_size, layer_sizes=deep_layer_sizes, dropout_p=deep_dropout_p, activation=deep_activation)\n<|end_body_0|>\n\n<|body_start_1|>\n emb_interaction = self.emb_bilinear(emb_inputs.rename(None))\n emb_interaction.names = ('B', 'N', 'E')\n senet_emb = self.senet(emb_inputs.rename(None))\n senet_interaction = self.senet_bilinear(senet_emb.rename(None))\n senet_interaction.names = ('B', 'N', 'E')\n outputs = torch.cat([emb_interaction, senet_interaction], dim='N')\n outputs = outputs.flatten(('N', 'E'), 'O')\n outputs = self.deep(outputs.rename(None))\n outputs = outputs.rename(None)\n return outputs\n<|end_body_1|>\n", "class_docstring": "Model class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.", "class_name": "FeatureImportanceAndBilinearFeatureInteractionNetwork", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FeatureImportanceAndBilinearFeatureInteractionNetwork:\n \"\"\"Model class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.\"\"\"\n\n def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU()):\n \"\"\"Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens\"\"\"\n <|body_0|>\n\n def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n inputs_size = combination(num_fields, 2)\n inputs_size = inputs_size * embed_size * 2\n self.senet = SENETLayer(num_fields, senet_reduction, squared=False)\n self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.deep = DNNLayer(inputs_size=inputs_size, output_size=deep_output_size, layer_sizes=deep_layer_sizes, dropout_p=deep_dropout_p, activation=deep_activation)\n<|end_body_0|>\n\n<|body_start_1|>\n emb_interaction = self.emb_bilinear(emb_inputs.rename(None))\n emb_interaction.names = ('B', 'N', 'E')\n senet_emb = self.senet(emb_inputs.rename(None))\n senet_interaction = self.senet_bilinear(senet_emb.rename(None))\n senet_interaction.names = ('B', 'N', 'E')\n outputs = torch.cat([emb_interaction, senet_interaction], dim='N')\n outputs = outputs.flatten(('N', 'E'), 'O')\n outputs = self.deep(outputs.rename(None))\n outputs = outputs.rename(None)\n return outputs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000242", "length_bytes": 4997, "license_type": "permissive", "methods": [{"docstring": "Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens", "name": "__init__", "signature": "def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU())"}, {"docstring": "Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork", "name": "forward", "signature": "def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024890", "prompt": "Implement the Python class `FeatureImportanceAndBilinearFeatureInteractionNetwork` described below.\n\nClass description:\nModel class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.\n\nMethod signatures and docstrings:\n- def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU()): Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens\n- def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor: Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork", "prompted_full_text": "Implement the Python class `FeatureImportanceAndBilinearFeatureInteractionNetwork` described below.\n\nClass description:\nModel class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.\n\nMethod signatures and docstrings:\n- def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU()): Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens\n- def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor: Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork\n\n<|skeleton|>\nclass FeatureImportanceAndBilinearFeatureInteractionNetwork:\n \"\"\"Model class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.\"\"\"\n\n def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU()):\n \"\"\"Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens\"\"\"\n <|body_0|>\n\n def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n inputs_size = combination(num_fields, 2)\n inputs_size = inputs_size * embed_size * 2\n self.senet = SENETLayer(num_fields, senet_reduction, squared=False)\n self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.deep = DNNLayer(inputs_size=inputs_size, output_size=deep_output_size, layer_sizes=deep_layer_sizes, dropout_p=deep_dropout_p, activation=deep_activation)\n<|end_body_0|>\n\n<|body_start_1|>\n emb_interaction = self.emb_bilinear(emb_inputs.rename(None))\n emb_interaction.names = ('B', 'N', 'E')\n senet_emb = self.senet(emb_inputs.rename(None))\n senet_interaction = self.senet_bilinear(senet_emb.rename(None))\n senet_interaction.names = ('B', 'N', 'E')\n outputs = torch.cat([emb_interaction, senet_interaction], dim='N')\n outputs = outputs.flatten(('N', 'E'), 'O')\n outputs = self.deep(outputs.rename(None))\n outputs = outputs.rename(None)\n return outputs\n<|end_body_1|>\n", "revision_id": "751a43b9cd35e951d81c0d9cf46507b1777bb7ff", "skeleton": "<|skeleton|>\nclass FeatureImportanceAndBilinearFeatureInteractionNetwork:\n \"\"\"Model class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.\"\"\"\n\n def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU()):\n \"\"\"Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens\"\"\"\n <|body_0|>\n\n def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FeatureImportanceAndBilinearFeatureInteractionNetwork:\n \"\"\"Model class of Feature-Importance and Bilinear-Feature-Interaction Network (FiBiNet). Feature-Importance and Bilinear-Feature-Interaction Network was proposed by Tongwen Huang in Sina Weibo Inc. in 2019, which is: #. to implement a famous computer vision algorithm `SENET` on recommendation system. #. to apply bilinear calculation to calculate features interactions rather than using inner-product or hadamard product, where they were used in recommendation system always. :Reference: #. `Tongwen Huang et al, 2019. FibiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction `_.\"\"\"\n\n def __init__(self, embed_size: int, num_fields: int, senet_reduction: int, deep_output_size: int, deep_layer_sizes: List[int], bilinear_type: Optional[str]='all', bilinear_bias: Optional[bool]=True, deep_dropout_p: Optional[List[float]]=None, deep_activation: Optional[nn.Module]=nn.ReLU()):\n \"\"\"Initialize FeatureImportanceAndBilinearFeatureInteractionNetwork Args: embed_size (int): size of embedding tensor num_fields (int): number of inputs' fields senet_reduction (int): size of reduction in dense layer of senet. deep_output_size (int): output size of dense network deep_layer_sizes (List[int]): layer sizes of dense network bilinear_type (str, optional): type of bilinear to calculate interactions. Defaults to \"all\" bilinear_bias (bool, optional): flag to control using bias in bilinear-interactions. Defaults to True deep_dropout_p (List[float], optional): probability of Dropout in dense network. Defaults to None deep_activation (torch.nn.Module, optional): activation function of dens\"\"\"\n super().__init__()\n inputs_size = combination(num_fields, 2)\n inputs_size = inputs_size * embed_size * 2\n self.senet = SENETLayer(num_fields, senet_reduction, squared=False)\n self.emb_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.senet_bilinear = BilinearInteractionLayer(embed_size, num_fields, bilinear_type, bilinear_bias)\n self.deep = DNNLayer(inputs_size=inputs_size, output_size=deep_output_size, layer_sizes=deep_layer_sizes, dropout_p=deep_dropout_p, activation=deep_activation)\n\n def forward(self, emb_inputs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward calculation of FeatureImportanceAndBilinearFeatureInteractionNetwork Args: emb_inputs (T), shape = (B, N, E), data_type = torch.float: embedded features tensors Returns: T, shape = (B, O), data_type = torch.float: output of FeatureImportanceAndBilinearFeatureInteractionNetwork\"\"\"\n emb_interaction = self.emb_bilinear(emb_inputs.rename(None))\n emb_interaction.names = ('B', 'N', 'E')\n senet_emb = self.senet(emb_inputs.rename(None))\n senet_interaction = self.senet_bilinear(senet_emb.rename(None))\n senet_interaction.names = ('B', 'N', 'E')\n outputs = torch.cat([emb_interaction, senet_interaction], dim='N')\n outputs = outputs.flatten(('N', 'E'), 'O')\n outputs = self.deep(outputs.rename(None))\n outputs = outputs.rename(None)\n return outputs\n", "source": "the_stack_v2_python_sparse", "source_path": "torecsys/models/ctr/feature_importance_and_bilinear_feature_interaction_network.py", "source_repo": "p768lwy3/torecsys", "split": "test", "star_events_count": 98} {"blob_id": "a2d6aa03aa41ae5fa842219ae37a6dabdfddf0c9", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "UserFavServicer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserFavServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def GetFavList(self, request, context):\n \"\"\"过滤收藏信息\"\"\"\n <|body_0|>\n\n def AddUserFav(self, request, context):\n \"\"\"添加留言\"\"\"\n <|body_1|>\n\n def DeleteUserFav(self, request, context):\n \"\"\"删除留言\"\"\"\n <|body_2|>\n\n def GetUserFavDetail(self, request, context):\n \"\"\"删除留言\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000243", "length_bytes": 6939, "license_type": "no_license", "methods": [{"docstring": "过滤收藏信息", "name": "GetFavList", "signature": "def GetFavList(self, request, context)"}, {"docstring": "添加留言", "name": "AddUserFav", "signature": "def AddUserFav(self, request, context)"}, {"docstring": "删除留言", "name": "DeleteUserFav", "signature": "def DeleteUserFav(self, request, context)"}, {"docstring": "删除留言", "name": "GetUserFavDetail", "signature": "def GetUserFavDetail(self, request, context)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_020289", "prompt": "Implement the Python class `UserFavServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def GetFavList(self, request, context): 过滤收藏信息\n- def AddUserFav(self, request, context): 添加留言\n- def DeleteUserFav(self, request, context): 删除留言\n- def GetUserFavDetail(self, request, context): 删除留言", "prompted_full_text": "Implement the Python class `UserFavServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def GetFavList(self, request, context): 过滤收藏信息\n- def AddUserFav(self, request, context): 添加留言\n- def DeleteUserFav(self, request, context): 删除留言\n- def GetUserFavDetail(self, request, context): 删除留言\n\n<|skeleton|>\nclass UserFavServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def GetFavList(self, request, context):\n \"\"\"过滤收藏信息\"\"\"\n <|body_0|>\n\n def AddUserFav(self, request, context):\n \"\"\"添加留言\"\"\"\n <|body_1|>\n\n def DeleteUserFav(self, request, context):\n \"\"\"删除留言\"\"\"\n <|body_2|>\n\n def GetUserFavDetail(self, request, context):\n \"\"\"删除留言\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "revision_id": "e01c8fc9d9e734578733816669c122dede02489e", "skeleton": "<|skeleton|>\nclass UserFavServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def GetFavList(self, request, context):\n \"\"\"过滤收藏信息\"\"\"\n <|body_0|>\n\n def AddUserFav(self, request, context):\n \"\"\"添加留言\"\"\"\n <|body_1|>\n\n def DeleteUserFav(self, request, context):\n \"\"\"删除留言\"\"\"\n <|body_2|>\n\n def GetUserFavDetail(self, request, context):\n \"\"\"删除留言\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserFavServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def GetFavList(self, request, context):\n \"\"\"过滤收藏信息\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def AddUserFav(self, request, context):\n \"\"\"添加留言\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteUserFav(self, request, context):\n \"\"\"删除留言\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetUserFavDetail(self, request, context):\n \"\"\"删除留言\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "other/mxshop_srvs/userop_srv/proto/userfav_pb2_grpc.py", "source_repo": "xqmmy/go-fresh", "split": "test", "star_events_count": 1} {"blob_id": "d9acaf1c2888867eb23218d60d2e3271a6f9c55a", "bodies": ["if not root:\n return '[]'\nif root.children == []:\n return '[{}]'.format(root.val)\nresult = '[{}'.format(root.val)\nfor child in root.children:\n result += self.serialize(child)\nreturn result + ']'", "if data == '[]' or ']':\n return None\ni = 1\nj = 1\nwhile data[j] != '[':\n j += 1\nroot = Node(int(data[i:j]), [])\ni = j\nwhile i < len(data) - 1 and j < len(data) - 1:\n left = 1\n while left:\n if data[j] == ']':\n left -= 1\n elif data[j] == '[':\n left += 1\n root.children.append(self.deserialize(data[i:j]))\n j += 1\n i = j\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return '[]'\n if root.children == []:\n return '[{}]'.format(root.val)\n result = '[{}'.format(root.val)\n for child in root.children:\n result += self.serialize(child)\n return result + ']'\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '[]' or ']':\n return None\n i = 1\n j = 1\n while data[j] != '[':\n j += 1\n root = Node(int(data[i:j]), [])\n i = j\n while i < len(data) - 1 and j < len(data) - 1:\n left = 1\n while left:\n if data[j] == ']':\n left -= 1\n elif data[j] == '[':\n left += 1\n root.children.append(self.deserialize(data[i:j]))\n j += 1\n i = j\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return '[]'\n if root.children == []:\n return '[{}]'.format(root.val)\n result = '[{}'.format(root.val)\n for child in root.children:\n result += self.serialize(child)\n return result + ']'\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '[]' or ']':\n return None\n i = 1\n j = 1\n while data[j] != '[':\n j += 1\n root = Node(int(data[i:j]), [])\n i = j\n while i < len(data) - 1 and j < len(data) - 1:\n left = 1\n while left:\n if data[j] == ']':\n left -= 1\n elif data[j] == '[':\n left += 1\n root.children.append(self.deserialize(data[i:j]))\n j += 1\n i = j\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000244", "length_bytes": 2159, "license_type": "permissive", "methods": [{"docstring": "Encodes a tree to a single string. :type root: Node :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: Node", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: Node :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: Node\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return '[]'\n if root.children == []:\n return '[{}]'.format(root.val)\n result = '[{}'.format(root.val)\n for child in root.children:\n result += self.serialize(child)\n return result + ']'\n<|end_body_0|>\n\n<|body_start_1|>\n if data == '[]' or ']':\n return None\n i = 1\n j = 1\n while data[j] != '[':\n j += 1\n root = Node(int(data[i:j]), [])\n i = j\n while i < len(data) - 1 and j < len(data) - 1:\n left = 1\n while left:\n if data[j] == ']':\n left -= 1\n elif data[j] == '[':\n left += 1\n root.children.append(self.deserialize(data[i:j]))\n j += 1\n i = j\n return root\n<|end_body_1|>\n", "revision_id": "817916a62774145fe6387b715f76c5badbf99197", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: Node :rtype: str\"\"\"\n if not root:\n return '[]'\n if root.children == []:\n return '[{}]'.format(root.val)\n result = '[{}'.format(root.val)\n for child in root.children:\n result += self.serialize(child)\n return result + ']'\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: Node\"\"\"\n if data == '[]' or ']':\n return None\n i = 1\n j = 1\n while data[j] != '[':\n j += 1\n root = Node(int(data[i:j]), [])\n i = j\n while i < len(data) - 1 and j < len(data) - 1:\n left = 1\n while left:\n if data[j] == ']':\n left -= 1\n elif data[j] == '[':\n left += 1\n root.children.append(self.deserialize(data[i:j]))\n j += 1\n i = j\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/428.py", "source_repo": "zi-NaN/algorithm_exercise", "split": "test", "star_events_count": 0} {"blob_id": "8267d34ec9849d005b0a7273a3e6b4a48ba46193", "bodies": ["mail = self.cleaned_data['mail']\nvalidate_mail = re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\\\.[a-zA-Z0-9-]+)*$\", mail)\nif not validate_mail:\n raise forms.ValidationError('El email debe ser valido')\nreturn mail", "full_name = self.cleaned_data['full_name']\nif len(full_name) < 5:\n raise forms.ValidationError('Ingrese un nombre más largo')\nreturn full_name", "password = self.cleaned_data['password']\nif len(password) < 8:\n raise forms.ValidationError('La contraseña debe tener un largo minimo de 8')\nreturn password"], "bodies_text": "<|body_start_0|>\n mail = self.cleaned_data['mail']\n validate_mail = re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\\\.[a-zA-Z0-9-]+)*$\", mail)\n if not validate_mail:\n raise forms.ValidationError('El email debe ser valido')\n return mail\n<|end_body_0|>\n\n<|body_start_1|>\n full_name = self.cleaned_data['full_name']\n if len(full_name) < 5:\n raise forms.ValidationError('Ingrese un nombre más largo')\n return full_name\n<|end_body_1|>\n\n<|body_start_2|>\n password = self.cleaned_data['password']\n if len(password) < 8:\n raise forms.ValidationError('La contraseña debe tener un largo minimo de 8')\n return password\n<|end_body_2|>\n", "class_docstring": "Base form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder", "class_name": "BaseUserForm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseUserForm:\n \"\"\"Base form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder\"\"\"\n\n def clean_mail(self):\n \"\"\"mail is validated through a regular expression. if not matched returns error\"\"\"\n <|body_0|>\n\n def clean_full_name(self):\n \"\"\"the number of characters of full_name is validated. if not matched returns error\"\"\"\n <|body_1|>\n\n def clean_password(self):\n \"\"\"the number of characters of password is validated. if not matched returns error\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mail = self.cleaned_data['mail']\n validate_mail = re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\\\.[a-zA-Z0-9-]+)*$\", mail)\n if not validate_mail:\n raise forms.ValidationError('El email debe ser valido')\n return mail\n<|end_body_0|>\n\n<|body_start_1|>\n full_name = self.cleaned_data['full_name']\n if len(full_name) < 5:\n raise forms.ValidationError('Ingrese un nombre más largo')\n return full_name\n<|end_body_1|>\n\n<|body_start_2|>\n password = self.cleaned_data['password']\n if len(password) < 8:\n raise forms.ValidationError('La contraseña debe tener un largo minimo de 8')\n return password\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000245", "length_bytes": 2463, "license_type": "no_license", "methods": [{"docstring": "mail is validated through a regular expression. if not matched returns error", "name": "clean_mail", "signature": "def clean_mail(self)"}, {"docstring": "the number of characters of full_name is validated. if not matched returns error", "name": "clean_full_name", "signature": "def clean_full_name(self)"}, {"docstring": "the number of characters of password is validated. if not matched returns error", "name": "clean_password", "signature": "def clean_password(self)"}], "n_methods": 3, "prompt": "Implement the Python class `BaseUserForm` described below.\n\nClass description:\nBase form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder\n\nMethod signatures and docstrings:\n- def clean_mail(self): mail is validated through a regular expression. if not matched returns error\n- def clean_full_name(self): the number of characters of full_name is validated. if not matched returns error\n- def clean_password(self): the number of characters of password is validated. if not matched returns error", "prompted_full_text": "Implement the Python class `BaseUserForm` described below.\n\nClass description:\nBase form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder\n\nMethod signatures and docstrings:\n- def clean_mail(self): mail is validated through a regular expression. if not matched returns error\n- def clean_full_name(self): the number of characters of full_name is validated. if not matched returns error\n- def clean_password(self): the number of characters of password is validated. if not matched returns error\n\n<|skeleton|>\nclass BaseUserForm:\n \"\"\"Base form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder\"\"\"\n\n def clean_mail(self):\n \"\"\"mail is validated through a regular expression. if not matched returns error\"\"\"\n <|body_0|>\n\n def clean_full_name(self):\n \"\"\"the number of characters of full_name is validated. if not matched returns error\"\"\"\n <|body_1|>\n\n def clean_password(self):\n \"\"\"the number of characters of password is validated. if not matched returns error\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mail = self.cleaned_data['mail']\n validate_mail = re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\\\.[a-zA-Z0-9-]+)*$\", mail)\n if not validate_mail:\n raise forms.ValidationError('El email debe ser valido')\n return mail\n<|end_body_0|>\n\n<|body_start_1|>\n full_name = self.cleaned_data['full_name']\n if len(full_name) < 5:\n raise forms.ValidationError('Ingrese un nombre más largo')\n return full_name\n<|end_body_1|>\n\n<|body_start_2|>\n password = self.cleaned_data['password']\n if len(password) < 8:\n raise forms.ValidationError('La contraseña debe tener un largo minimo de 8')\n return password\n<|end_body_2|>\n", "revision_id": "e31baa3d76b322df90338a074b54c4ae08a99921", "skeleton": "<|skeleton|>\nclass BaseUserForm:\n \"\"\"Base form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder\"\"\"\n\n def clean_mail(self):\n \"\"\"mail is validated through a regular expression. if not matched returns error\"\"\"\n <|body_0|>\n\n def clean_full_name(self):\n \"\"\"the number of characters of full_name is validated. if not matched returns error\"\"\"\n <|body_1|>\n\n def clean_password(self):\n \"\"\"the number of characters of password is validated. if not matched returns error\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseUserForm:\n \"\"\"Base form class for User. the fields are customized: * full_name : configure a textInput and add placeholder * mail : configure a EmailInput and add placeholder * password : configure PasswordInput and add placeholder * profile : configure Select and add placeholder\"\"\"\n\n def clean_mail(self):\n \"\"\"mail is validated through a regular expression. if not matched returns error\"\"\"\n mail = self.cleaned_data['mail']\n validate_mail = re.match(\"^[a-zA-Z0-9.!#$%&'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+(?:\\\\.[a-zA-Z0-9-]+)*$\", mail)\n if not validate_mail:\n raise forms.ValidationError('El email debe ser valido')\n return mail\n\n def clean_full_name(self):\n \"\"\"the number of characters of full_name is validated. if not matched returns error\"\"\"\n full_name = self.cleaned_data['full_name']\n if len(full_name) < 5:\n raise forms.ValidationError('Ingrese un nombre más largo')\n return full_name\n\n def clean_password(self):\n \"\"\"the number of characters of password is validated. if not matched returns error\"\"\"\n password = self.cleaned_data['password']\n if len(password) < 8:\n raise forms.ValidationError('La contraseña debe tener un largo minimo de 8')\n return password\n", "source": "the_stack_v2_python_sparse", "source_path": "applications/users/forms.py", "source_repo": "OscarJara/nora-menu", "split": "test", "star_events_count": 0} {"blob_id": "b18156591ffa6a2e2378d6b77eb15b079ccba36c", "bodies": ["try:\n\n def generate(vo):\n for exception in list_exceptions(exception_id, vo=vo):\n yield (dumps(exception, cls=APIEncoder) + '\\n')\n return try_stream(generate(vo=request.environ.get('vo')))\nexcept LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\nexcept RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\nexcept Exception as error:\n print(format_exc())\n return (str(error), 500)", "json_data = request.data\ntry:\n params = loads(json_data)\nexcept ValueError:\n return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')\ntry:\n state = params['state']\nexcept KeyError:\n state = None\ntry:\n update_exception(exception_id=exception_id, state=state, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))\nexcept UnsupportedOperation as error:\n return generate_http_error_flask(400, 'UnsupportedOperation', error.args[0])\nexcept AccessDenied as error:\n return generate_http_error_flask(401, 'AccessDenied', error.args[0])\nexcept LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\nexcept RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\nexcept Exception as error:\n print(format_exc())\n return (str(error), 500)\nreturn ('Created', 201)"], "bodies_text": "<|body_start_0|>\n try:\n\n def generate(vo):\n for exception in list_exceptions(exception_id, vo=vo):\n yield (dumps(exception, cls=APIEncoder) + '\\n')\n return try_stream(generate(vo=request.environ.get('vo')))\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.data\n try:\n params = loads(json_data)\n except ValueError:\n return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')\n try:\n state = params['state']\n except KeyError:\n state = None\n try:\n update_exception(exception_id=exception_id, state=state, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))\n except UnsupportedOperation as error:\n return generate_http_error_flask(400, 'UnsupportedOperation', error.args[0])\n except AccessDenied as error:\n return generate_http_error_flask(401, 'AccessDenied', error.args[0])\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n return ('Created', 201)\n<|end_body_1|>\n", "class_docstring": "REST APIs for Lifetime Model exception.", "class_name": "LifetimeExceptionId", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LifetimeExceptionId:\n \"\"\"REST APIs for Lifetime Model exception.\"\"\"\n\n def get(self, exception_id):\n \"\"\"Retrieve an exception. .. :quickref: LifetimeExceptionId; Get an exceptions. :param exception_id: The exception identifier. :resheader Content-Type: application/x-json-stream :status 200: OK. :status 401: Invalid Auth Token. :status 404: Lifetime Exception Not Found. :status 406: Not Acceptable. :status 500: Internal Error. :returns: List of exceptions.\"\"\"\n <|body_0|>\n\n def put(self, exception_id):\n \"\"\"Approve/Reject an execption. .. :quickref: LifetimeExceptionId; Approve/reject exception. :param exception_id: The exception identifier. :\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n\n def generate(vo):\n for exception in list_exceptions(exception_id, vo=vo):\n yield (dumps(exception, cls=APIEncoder) + '\\n')\n return try_stream(generate(vo=request.environ.get('vo')))\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.data\n try:\n params = loads(json_data)\n except ValueError:\n return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')\n try:\n state = params['state']\n except KeyError:\n state = None\n try:\n update_exception(exception_id=exception_id, state=state, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))\n except UnsupportedOperation as error:\n return generate_http_error_flask(400, 'UnsupportedOperation', error.args[0])\n except AccessDenied as error:\n return generate_http_error_flask(401, 'AccessDenied', error.args[0])\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n return ('Created', 201)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000246", "length_bytes": 8648, "license_type": "permissive", "methods": [{"docstring": "Retrieve an exception. .. :quickref: LifetimeExceptionId; Get an exceptions. :param exception_id: The exception identifier. :resheader Content-Type: application/x-json-stream :status 200: OK. :status 401: Invalid Auth Token. :status 404: Lifetime Exception Not Found. :status 406: Not Acceptable. :status 500: Internal Error. :returns: List of exceptions.", "name": "get", "signature": "def get(self, exception_id)"}, {"docstring": "Approve/Reject an execption. .. :quickref: LifetimeExceptionId; Approve/reject exception. :param exception_id: The exception identifier. :\nclass LifetimeExceptionId:\n \"\"\"REST APIs for Lifetime Model exception.\"\"\"\n\n def get(self, exception_id):\n \"\"\"Retrieve an exception. .. :quickref: LifetimeExceptionId; Get an exceptions. :param exception_id: The exception identifier. :resheader Content-Type: application/x-json-stream :status 200: OK. :status 401: Invalid Auth Token. :status 404: Lifetime Exception Not Found. :status 406: Not Acceptable. :status 500: Internal Error. :returns: List of exceptions.\"\"\"\n <|body_0|>\n\n def put(self, exception_id):\n \"\"\"Approve/Reject an execption. .. :quickref: LifetimeExceptionId; Approve/reject exception. :param exception_id: The exception identifier. :\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n\n def generate(vo):\n for exception in list_exceptions(exception_id, vo=vo):\n yield (dumps(exception, cls=APIEncoder) + '\\n')\n return try_stream(generate(vo=request.environ.get('vo')))\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n<|end_body_0|>\n\n<|body_start_1|>\n json_data = request.data\n try:\n params = loads(json_data)\n except ValueError:\n return generate_http_error_flask(400, 'ValueError', 'Cannot decode json parameter list')\n try:\n state = params['state']\n except KeyError:\n state = None\n try:\n update_exception(exception_id=exception_id, state=state, issuer=request.environ.get('issuer'), vo=request.environ.get('vo'))\n except UnsupportedOperation as error:\n return generate_http_error_flask(400, 'UnsupportedOperation', error.args[0])\n except AccessDenied as error:\n return generate_http_error_flask(401, 'AccessDenied', error.args[0])\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n return ('Created', 201)\n<|end_body_1|>\n", "revision_id": "bf33d9441d3b4ff160a392eed56724f635a03fe6", "skeleton": "<|skeleton|>\nclass LifetimeExceptionId:\n \"\"\"REST APIs for Lifetime Model exception.\"\"\"\n\n def get(self, exception_id):\n \"\"\"Retrieve an exception. .. :quickref: LifetimeExceptionId; Get an exceptions. :param exception_id: The exception identifier. :resheader Content-Type: application/x-json-stream :status 200: OK. :status 401: Invalid Auth Token. :status 404: Lifetime Exception Not Found. :status 406: Not Acceptable. :status 500: Internal Error. :returns: List of exceptions.\"\"\"\n <|body_0|>\n\n def put(self, exception_id):\n \"\"\"Approve/Reject an execption. .. :quickref: LifetimeExceptionId; Approve/reject exception. :param exception_id: The exception identifier. :\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LifetimeExceptionId:\n \"\"\"REST APIs for Lifetime Model exception.\"\"\"\n\n def get(self, exception_id):\n \"\"\"Retrieve an exception. .. :quickref: LifetimeExceptionId; Get an exceptions. :param exception_id: The exception identifier. :resheader Content-Type: application/x-json-stream :status 200: OK. :status 401: Invalid Auth Token. :status 404: Lifetime Exception Not Found. :status 406: Not Acceptable. :status 500: Internal Error. :returns: List of exceptions.\"\"\"\n try:\n\n def generate(vo):\n for exception in list_exceptions(exception_id, vo=vo):\n yield (dumps(exception, cls=APIEncoder) + '\\n')\n return try_stream(generate(vo=request.environ.get('vo')))\n except LifetimeExceptionNotFound as error:\n return generate_http_error_flask(404, 'LifetimeExceptionNotFound', error.args[0])\n except RucioException as error:\n return generate_http_error_flask(500, error.__class__.__name__, error.args[0])\n except Exception as error:\n print(format_exc())\n return (str(error), 500)\n\n def put(self, exception_id):\n \"\"\"Approve/Reject an execption. .. :quickref: LifetimeExceptionId; Approve/reject exception. :param exception_id: The exception identifier. :\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n<|end_body_1|>\n\n<|body_start_2|>\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n<|end_body_2|>\n\n<|body_start_3|>\n user = self.__session.query(User).filter_by(**kwargs).first()\n if user is None:\n raise NoResultFound\n if kwargs is None:\n raise InvalidRequestError\n return user\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n setattr(user, k, v)\n self._session.commit()\n<|end_body_4|>\n", "class_docstring": "DB class", "class_name": "DB", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DB:\n \"\"\"DB class\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new DB instance\"\"\"\n <|body_0|>\n\n def _session(self) -> Session:\n \"\"\"Memoized session object\"\"\"\n <|body_1|>\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"[add_user] method, which has two required string arguments: email and hashed_password, and returns a User object\"\"\"\n <|body_2|>\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"[find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]\"\"\"\n <|body_3|>\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"[update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n<|end_body_1|>\n\n<|body_start_2|>\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n<|end_body_2|>\n\n<|body_start_3|>\n user = self.__session.query(User).filter_by(**kwargs).first()\n if user is None:\n raise NoResultFound\n if kwargs is None:\n raise InvalidRequestError\n return user\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n setattr(user, k, v)\n self._session.commit()\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000247", "length_bytes": 2471, "license_type": "no_license", "methods": [{"docstring": "Initialize a new DB instance", "name": "__init__", "signature": "def __init__(self) -> None"}, {"docstring": "Memoized session object", "name": "_session", "signature": "def _session(self) -> Session"}, {"docstring": "[add_user] method, which has two required string arguments: email and hashed_password, and returns a User object", "name": "add_user", "signature": "def add_user(self, email: str, hashed_password: str) -> User"}, {"docstring": "[find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]", "name": "find_user_by", "signature": "def find_user_by(self, **kwargs) -> User"}, {"docstring": "[update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None", "name": "update_user", "signature": "def update_user(self, user_id: int, **kwargs) -> None"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_037388", "prompt": "Implement the Python class `DB` described below.\n\nClass description:\nDB class\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Initialize a new DB instance\n- def _session(self) -> Session: Memoized session object\n- def add_user(self, email: str, hashed_password: str) -> User: [add_user] method, which has two required string arguments: email and hashed_password, and returns a User object\n- def find_user_by(self, **kwargs) -> User: [find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]\n- def update_user(self, user_id: int, **kwargs) -> None: [update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None", "prompted_full_text": "Implement the Python class `DB` described below.\n\nClass description:\nDB class\n\nMethod signatures and docstrings:\n- def __init__(self) -> None: Initialize a new DB instance\n- def _session(self) -> Session: Memoized session object\n- def add_user(self, email: str, hashed_password: str) -> User: [add_user] method, which has two required string arguments: email and hashed_password, and returns a User object\n- def find_user_by(self, **kwargs) -> User: [find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]\n- def update_user(self, user_id: int, **kwargs) -> None: [update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None\n\n<|skeleton|>\nclass DB:\n \"\"\"DB class\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new DB instance\"\"\"\n <|body_0|>\n\n def _session(self) -> Session:\n \"\"\"Memoized session object\"\"\"\n <|body_1|>\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"[add_user] method, which has two required string arguments: email and hashed_password, and returns a User object\"\"\"\n <|body_2|>\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"[find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]\"\"\"\n <|body_3|>\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"[update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n<|end_body_0|>\n\n<|body_start_1|>\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n<|end_body_1|>\n\n<|body_start_2|>\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n<|end_body_2|>\n\n<|body_start_3|>\n user = self.__session.query(User).filter_by(**kwargs).first()\n if user is None:\n raise NoResultFound\n if kwargs is None:\n raise InvalidRequestError\n return user\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n setattr(user, k, v)\n self._session.commit()\n<|end_body_4|>\n", "revision_id": "c3c9174b79539069d9f830381dff217e8d9688b5", "skeleton": "<|skeleton|>\nclass DB:\n \"\"\"DB class\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new DB instance\"\"\"\n <|body_0|>\n\n def _session(self) -> Session:\n \"\"\"Memoized session object\"\"\"\n <|body_1|>\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"[add_user] method, which has two required string arguments: email and hashed_password, and returns a User object\"\"\"\n <|body_2|>\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"[find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]\"\"\"\n <|body_3|>\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"[update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DB:\n \"\"\"DB class\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize a new DB instance\"\"\"\n self._engine = create_engine('sqlite:///a.db', echo=False)\n Base.metadata.drop_all(self._engine)\n Base.metadata.create_all(self._engine)\n self.__session = None\n\n def _session(self) -> Session:\n \"\"\"Memoized session object\"\"\"\n if self.__session is None:\n DBSession = sessionmaker(bind=self._engine)\n self.__session = DBSession()\n return self.__session\n\n def add_user(self, email: str, hashed_password: str) -> User:\n \"\"\"[add_user] method, which has two required string arguments: email and hashed_password, and returns a User object\"\"\"\n user = User(email=email, hashed_password=hashed_password)\n self._session.add(user)\n self._session.commit()\n return user\n\n def find_user_by(self, **kwargs) -> User:\n \"\"\"[find_user_by] Raises: NoResultFound: [when no results are found, or when wrong query arguments are passed] InvalidRequestError: [when no results are found, or when wrong query arguments are passed] Returns: User: [returns the first row found in the users]\"\"\"\n user = self.__session.query(User).filter_by(**kwargs).first()\n if user is None:\n raise NoResultFound\n if kwargs is None:\n raise InvalidRequestError\n return user\n\n def update_user(self, user_id: int, **kwargs) -> None:\n \"\"\"[update_user] Raises ValueError: If an argument that does not correspond to a user attribute is passed Returns: method that takes as argument a required user_id integer and arbitrary keyword arguments, and returns None\"\"\"\n user = self.find_user_by(id=user_id)\n for k, v in kwargs.items():\n if not hasattr(user, k):\n raise ValueError\n setattr(user, k, v)\n self._session.commit()\n", "source": "the_stack_v2_python_sparse", "source_path": "0x08-user_authentication_service/db.py", "source_repo": "usfbelhadj/holbertonschool-web_back_end", "split": "test", "star_events_count": 0} {"blob_id": "945a70860fe757229246716a98013de787c4c0da", "bodies": ["for prog, _, func in self.lexmap:\n mo = prog.match(self.txt, self.pos)\n if mo:\n column = mo.start() - self.line_start\n length = mo.end() - mo.start()\n loc = SourceLocation(self.filename, self.line, column, length)\n self.pos = mo.end()\n val = mo.group(0)\n if '\\n' in val:\n self.line += val.count('\\n')\n self.line_start = mo.start()\n res = func(self, val)\n if res:\n typ, val = res\n return Token(typ, val, loc)\n else:\n return\nchar = self.txt[self.pos]\ncolumn = self.pos - self.line_start\nloc = SourceLocation(self.filename, self.line, column, 1)\nraise CompilerError('Unexpected char: {0} (0x{1:X})'.format(char, ord(char)), loc=loc)", "self.line = 1\nself.line_start = 0\nself.pos = 0\nself.txt = txt\nwhile len(txt) != self.pos:\n tok = self.gettok()\n if tok:\n yield tok\nif eof:\n loc = SourceLocation(self.filename, self.line, 0, 0)\n yield Token(EOF, EOF, loc)"], "bodies_text": "<|body_start_0|>\n for prog, _, func in self.lexmap:\n mo = prog.match(self.txt, self.pos)\n if mo:\n column = mo.start() - self.line_start\n length = mo.end() - mo.start()\n loc = SourceLocation(self.filename, self.line, column, length)\n self.pos = mo.end()\n val = mo.group(0)\n if '\\n' in val:\n self.line += val.count('\\n')\n self.line_start = mo.start()\n res = func(self, val)\n if res:\n typ, val = res\n return Token(typ, val, loc)\n else:\n return\n char = self.txt[self.pos]\n column = self.pos - self.line_start\n loc = SourceLocation(self.filename, self.line, column, 1)\n raise CompilerError('Unexpected char: {0} (0x{1:X})'.format(char, ord(char)), loc=loc)\n<|end_body_0|>\n\n<|body_start_1|>\n self.line = 1\n self.line_start = 0\n self.pos = 0\n self.txt = txt\n while len(txt) != self.pos:\n tok = self.gettok()\n if tok:\n yield tok\n if eof:\n loc = SourceLocation(self.filename, self.line, 0, 0)\n yield Token(EOF, EOF, loc)\n<|end_body_1|>\n", "class_docstring": "Simple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.", "class_name": "SimpleLexer", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SimpleLexer:\n \"\"\"Simple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.\"\"\"\n\n def gettok(self):\n \"\"\"Find a match at the given position\"\"\"\n <|body_0|>\n\n def tokenize(self, txt, eof=False):\n \"\"\"Generator that generates lexical tokens from text. Optionally yield the EOF token.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for prog, _, func in self.lexmap:\n mo = prog.match(self.txt, self.pos)\n if mo:\n column = mo.start() - self.line_start\n length = mo.end() - mo.start()\n loc = SourceLocation(self.filename, self.line, column, length)\n self.pos = mo.end()\n val = mo.group(0)\n if '\\n' in val:\n self.line += val.count('\\n')\n self.line_start = mo.start()\n res = func(self, val)\n if res:\n typ, val = res\n return Token(typ, val, loc)\n else:\n return\n char = self.txt[self.pos]\n column = self.pos - self.line_start\n loc = SourceLocation(self.filename, self.line, column, 1)\n raise CompilerError('Unexpected char: {0} (0x{1:X})'.format(char, ord(char)), loc=loc)\n<|end_body_0|>\n\n<|body_start_1|>\n self.line = 1\n self.line_start = 0\n self.pos = 0\n self.txt = txt\n while len(txt) != self.pos:\n tok = self.gettok()\n if tok:\n yield tok\n if eof:\n loc = SourceLocation(self.filename, self.line, 0, 0)\n yield Token(EOF, EOF, loc)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000248", "length_bytes": 5205, "license_type": "permissive", "methods": [{"docstring": "Find a match at the given position", "name": "gettok", "signature": "def gettok(self)"}, {"docstring": "Generator that generates lexical tokens from text. Optionally yield the EOF token.", "name": "tokenize", "signature": "def tokenize(self, txt, eof=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001832", "prompt": "Implement the Python class `SimpleLexer` described below.\n\nClass description:\nSimple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.\n\nMethod signatures and docstrings:\n- def gettok(self): Find a match at the given position\n- def tokenize(self, txt, eof=False): Generator that generates lexical tokens from text. Optionally yield the EOF token.", "prompted_full_text": "Implement the Python class `SimpleLexer` described below.\n\nClass description:\nSimple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.\n\nMethod signatures and docstrings:\n- def gettok(self): Find a match at the given position\n- def tokenize(self, txt, eof=False): Generator that generates lexical tokens from text. Optionally yield the EOF token.\n\n<|skeleton|>\nclass SimpleLexer:\n \"\"\"Simple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.\"\"\"\n\n def gettok(self):\n \"\"\"Find a match at the given position\"\"\"\n <|body_0|>\n\n def tokenize(self, txt, eof=False):\n \"\"\"Generator that generates lexical tokens from text. Optionally yield the EOF token.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n for prog, _, func in self.lexmap:\n mo = prog.match(self.txt, self.pos)\n if mo:\n column = mo.start() - self.line_start\n length = mo.end() - mo.start()\n loc = SourceLocation(self.filename, self.line, column, length)\n self.pos = mo.end()\n val = mo.group(0)\n if '\\n' in val:\n self.line += val.count('\\n')\n self.line_start = mo.start()\n res = func(self, val)\n if res:\n typ, val = res\n return Token(typ, val, loc)\n else:\n return\n char = self.txt[self.pos]\n column = self.pos - self.line_start\n loc = SourceLocation(self.filename, self.line, column, 1)\n raise CompilerError('Unexpected char: {0} (0x{1:X})'.format(char, ord(char)), loc=loc)\n<|end_body_0|>\n\n<|body_start_1|>\n self.line = 1\n self.line_start = 0\n self.pos = 0\n self.txt = txt\n while len(txt) != self.pos:\n tok = self.gettok()\n if tok:\n yield tok\n if eof:\n loc = SourceLocation(self.filename, self.line, 0, 0)\n yield Token(EOF, EOF, loc)\n<|end_body_1|>\n", "revision_id": "ba0840bc5f4ffd889f882a814fb26f88cd854379", "skeleton": "<|skeleton|>\nclass SimpleLexer:\n \"\"\"Simple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.\"\"\"\n\n def gettok(self):\n \"\"\"Find a match at the given position\"\"\"\n <|body_0|>\n\n def tokenize(self, txt, eof=False):\n \"\"\"Generator that generates lexical tokens from text. Optionally yield the EOF token.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SimpleLexer:\n \"\"\"Simple class for lexing. Use this class by subclassing it and decorating handler methods with the 'on' function.\"\"\"\n\n def gettok(self):\n \"\"\"Find a match at the given position\"\"\"\n for prog, _, func in self.lexmap:\n mo = prog.match(self.txt, self.pos)\n if mo:\n column = mo.start() - self.line_start\n length = mo.end() - mo.start()\n loc = SourceLocation(self.filename, self.line, column, length)\n self.pos = mo.end()\n val = mo.group(0)\n if '\\n' in val:\n self.line += val.count('\\n')\n self.line_start = mo.start()\n res = func(self, val)\n if res:\n typ, val = res\n return Token(typ, val, loc)\n else:\n return\n char = self.txt[self.pos]\n column = self.pos - self.line_start\n loc = SourceLocation(self.filename, self.line, column, 1)\n raise CompilerError('Unexpected char: {0} (0x{1:X})'.format(char, ord(char)), loc=loc)\n\n def tokenize(self, txt, eof=False):\n \"\"\"Generator that generates lexical tokens from text. Optionally yield the EOF token.\"\"\"\n self.line = 1\n self.line_start = 0\n self.pos = 0\n self.txt = txt\n while len(txt) != self.pos:\n tok = self.gettok()\n if tok:\n yield tok\n if eof:\n loc = SourceLocation(self.filename, self.line, 0, 0)\n yield Token(EOF, EOF, loc)\n", "source": "the_stack_v2_python_sparse", "source_path": "ppci/lang/tools/baselex.py", "source_repo": "obround/ppci", "split": "test", "star_events_count": 0} {"blob_id": "6962142da7b2361a835fc91b7e87eadd68240811", "bodies": ["profit = 0\nfor i in range(1, len(prices)):\n d = prices[i] - prices[i - 1]\n if d > 0:\n profit += d\nreturn profit", "if not prices:\n return 0\nhold = float('-inf')\nnot_hold = 0\nfor p in prices:\n hold = max(hold, not_hold - p)\n not_hold = max(not_hold, hold + p)\nreturn not_hold"], "bodies_text": "<|body_start_0|>\n profit = 0\n for i in range(1, len(prices)):\n d = prices[i] - prices[i - 1]\n if d > 0:\n profit += d\n return profit\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n hold = float('-inf')\n not_hold = 0\n for p in prices:\n hold = max(hold, not_hold - p)\n not_hold = max(not_hold, hold + p)\n return not_hold\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n profit = 0\n for i in range(1, len(prices)):\n d = prices[i] - prices[i - 1]\n if d > 0:\n profit += d\n return profit\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n hold = float('-inf')\n not_hold = 0\n for p in prices:\n hold = max(hold, not_hold - p)\n not_hold = max(not_hold, hold + p)\n return not_hold\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000249", "length_bytes": 733, "license_type": "no_license", "methods": [{"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfit", "signature": "def maxProfit(self, prices)"}, {"docstring": ":type prices: List[int] :rtype: int", "name": "maxProfit2", "signature": "def maxProfit2(self, prices)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034760", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit2(self, prices): :type prices: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxProfit(self, prices): :type prices: List[int] :rtype: int\n- def maxProfit2(self, prices): :type prices: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n profit = 0\n for i in range(1, len(prices)):\n d = prices[i] - prices[i - 1]\n if d > 0:\n profit += d\n return profit\n<|end_body_0|>\n\n<|body_start_1|>\n if not prices:\n return 0\n hold = float('-inf')\n not_hold = 0\n for p in prices:\n hold = max(hold, not_hold - p)\n not_hold = max(not_hold, hold + p)\n return not_hold\n<|end_body_1|>\n", "revision_id": "c0c76a9d56a484d2513fadbd1593342ee7fbe352", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def maxProfit2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maxProfit(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n profit = 0\n for i in range(1, len(prices)):\n d = prices[i] - prices[i - 1]\n if d > 0:\n profit += d\n return profit\n\n def maxProfit2(self, prices):\n \"\"\":type prices: List[int] :rtype: int\"\"\"\n if not prices:\n return 0\n hold = float('-inf')\n not_hold = 0\n for p in prices:\n hold = max(hold, not_hold - p)\n not_hold = max(not_hold, hold + p)\n return not_hold\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/0122.BestTimetoBuyandSellStockII.py", "source_repo": "Hijiao/algorithms", "split": "test", "star_events_count": 0} {"blob_id": "9ab6630bfa52fae1c237f226a85986fcc9195da5", "bodies": ["super().__init__(name=name)\nself._listen_ip = listen_ip\nself._listen_port = listen_port\nself.__listen_server = socket.socket()\nself.__listen_server.bind((listen_ip, listen_port))\nself.__can_run = False", "while self.__can_run:\n conn, add = self.__listen_server.accept()\n analyze_msg_thread = Thread(target=analyze_message, args=(conn,))\n analyze_msg_thread.start()", "LOG.info('启动监听线程, 监听地址: {0}:{1}'.format(self._listen_ip, self._listen_port))\nself.__listen_server.listen()\nself.__can_run = True\nself.__run()", "LOG.info('停止监听线程')\nself.__listen_server.close()\nself.__can_run = False"], "bodies_text": "<|body_start_0|>\n super().__init__(name=name)\n self._listen_ip = listen_ip\n self._listen_port = listen_port\n self.__listen_server = socket.socket()\n self.__listen_server.bind((listen_ip, listen_port))\n self.__can_run = False\n<|end_body_0|>\n\n<|body_start_1|>\n while self.__can_run:\n conn, add = self.__listen_server.accept()\n analyze_msg_thread = Thread(target=analyze_message, args=(conn,))\n analyze_msg_thread.start()\n<|end_body_1|>\n\n<|body_start_2|>\n LOG.info('启动监听线程, 监听地址: {0}:{1}'.format(self._listen_ip, self._listen_port))\n self.__listen_server.listen()\n self.__can_run = True\n self.__run()\n<|end_body_2|>\n\n<|body_start_3|>\n LOG.info('停止监听线程')\n self.__listen_server.close()\n self.__can_run = False\n<|end_body_3|>\n", "class_docstring": "后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.", "class_name": "ListenThread", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ListenThread:\n \"\"\"后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.\"\"\"\n\n def __init__(self, listen_ip='', listen_port=8088, name=None):\n \"\"\"对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称\"\"\"\n <|body_0|>\n\n def __run(self):\n \"\"\"监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.\"\"\"\n <|body_1|>\n\n def start(self):\n \"\"\"启动监听线程\"\"\"\n <|body_2|>\n\n def stop(self):\n \"\"\"停止监听线程\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(name=name)\n self._listen_ip = listen_ip\n self._listen_port = listen_port\n self.__listen_server = socket.socket()\n self.__listen_server.bind((listen_ip, listen_port))\n self.__can_run = False\n<|end_body_0|>\n\n<|body_start_1|>\n while self.__can_run:\n conn, add = self.__listen_server.accept()\n analyze_msg_thread = Thread(target=analyze_message, args=(conn,))\n analyze_msg_thread.start()\n<|end_body_1|>\n\n<|body_start_2|>\n LOG.info('启动监听线程, 监听地址: {0}:{1}'.format(self._listen_ip, self._listen_port))\n self.__listen_server.listen()\n self.__can_run = True\n self.__run()\n<|end_body_2|>\n\n<|body_start_3|>\n LOG.info('停止监听线程')\n self.__listen_server.close()\n self.__can_run = False\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000250", "length_bytes": 16601, "license_type": "permissive", "methods": [{"docstring": "对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称", "name": "__init__", "signature": "def __init__(self, listen_ip='', listen_port=8088, name=None)"}, {"docstring": "监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.", "name": "__run", "signature": "def __run(self)"}, {"docstring": "启动监听线程", "name": "start", "signature": "def start(self)"}, {"docstring": "停止监听线程", "name": "stop", "signature": "def stop(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_051173", "prompt": "Implement the Python class `ListenThread` described below.\n\nClass description:\n后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.\n\nMethod signatures and docstrings:\n- def __init__(self, listen_ip='', listen_port=8088, name=None): 对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称\n- def __run(self): 监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.\n- def start(self): 启动监听线程\n- def stop(self): 停止监听线程", "prompted_full_text": "Implement the Python class `ListenThread` described below.\n\nClass description:\n后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.\n\nMethod signatures and docstrings:\n- def __init__(self, listen_ip='', listen_port=8088, name=None): 对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称\n- def __run(self): 监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.\n- def start(self): 启动监听线程\n- def stop(self): 停止监听线程\n\n<|skeleton|>\nclass ListenThread:\n \"\"\"后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.\"\"\"\n\n def __init__(self, listen_ip='', listen_port=8088, name=None):\n \"\"\"对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称\"\"\"\n <|body_0|>\n\n def __run(self):\n \"\"\"监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.\"\"\"\n <|body_1|>\n\n def start(self):\n \"\"\"启动监听线程\"\"\"\n <|body_2|>\n\n def stop(self):\n \"\"\"停止监听线程\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(name=name)\n self._listen_ip = listen_ip\n self._listen_port = listen_port\n self.__listen_server = socket.socket()\n self.__listen_server.bind((listen_ip, listen_port))\n self.__can_run = False\n<|end_body_0|>\n\n<|body_start_1|>\n while self.__can_run:\n conn, add = self.__listen_server.accept()\n analyze_msg_thread = Thread(target=analyze_message, args=(conn,))\n analyze_msg_thread.start()\n<|end_body_1|>\n\n<|body_start_2|>\n LOG.info('启动监听线程, 监听地址: {0}:{1}'.format(self._listen_ip, self._listen_port))\n self.__listen_server.listen()\n self.__can_run = True\n self.__run()\n<|end_body_2|>\n\n<|body_start_3|>\n LOG.info('停止监听线程')\n self.__listen_server.close()\n self.__can_run = False\n<|end_body_3|>\n", "revision_id": "7ce2ca5183b222fe6cee0ba64171ea835fc62342", "skeleton": "<|skeleton|>\nclass ListenThread:\n \"\"\"后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.\"\"\"\n\n def __init__(self, listen_ip='', listen_port=8088, name=None):\n \"\"\"对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称\"\"\"\n <|body_0|>\n\n def __run(self):\n \"\"\"监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.\"\"\"\n <|body_1|>\n\n def start(self):\n \"\"\"启动监听线程\"\"\"\n <|body_2|>\n\n def stop(self):\n \"\"\"停止监听线程\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ListenThread:\n \"\"\"后台监听线程. 当代理服务器配置后,会启动线程,监听来自客户端的消息请求. 在监听线程中并不会对客户端请求直接处理,而是转交给单独的处理线程处理.\"\"\"\n\n def __init__(self, listen_ip='', listen_port=8088, name=None):\n \"\"\"对象初始化函数 :param listen_ip: 指定要监听的IP :param listen_port: 指定监听的端口 :param name: 指定线程的名称\"\"\"\n super().__init__(name=name)\n self._listen_ip = listen_ip\n self._listen_port = listen_port\n self.__listen_server = socket.socket()\n self.__listen_server.bind((listen_ip, listen_port))\n self.__can_run = False\n\n def __run(self):\n \"\"\"监听来自客户端发起的连接,和接收消息并启动新的线程处理消息.\"\"\"\n while self.__can_run:\n conn, add = self.__listen_server.accept()\n analyze_msg_thread = Thread(target=analyze_message, args=(conn,))\n analyze_msg_thread.start()\n\n def start(self):\n \"\"\"启动监听线程\"\"\"\n LOG.info('启动监听线程, 监听地址: {0}:{1}'.format(self._listen_ip, self._listen_port))\n self.__listen_server.listen()\n self.__can_run = True\n self.__run()\n\n def stop(self):\n \"\"\"停止监听线程\"\"\"\n LOG.info('停止监听线程')\n self.__listen_server.close()\n self.__can_run = False\n", "source": "the_stack_v2_python_sparse", "source_path": "server/ProxyServer.py", "source_repo": "Grant555/utilities-python", "split": "test", "star_events_count": 0} {"blob_id": "895e303938d5de3c83894a7c6c9d0449aaa3516d", "bodies": ["data = request.get_json()\nif not data:\n return ({'message': 'Please provide the required details', 'status': 400}, 400)\nlocation = data['location']\ntopic = data['topic']\nhappeningOn = data['happeningOn']\nif not location or location.isspace():\n return ({'message': 'location must be provided', 'status': 400}, 400)\nif not topic or topic.isspace():\n return ({'message': 'topic must be provided', 'status': 400}, 400)\nif not happeningOn or happeningOn.isspace():\n return ({'message': 'happeningOn must be provided', 'status': 400}, 400)\nif meetup.check_meetup(topic):\n return ({'message': 'meetup already exists', 'status': 400}, 400)\nmeetup_record = meetup.create_meetup(location, topic, happeningOn)\nif meetup_record:\n return ({'status': 201, 'data': meetup_record, 'message': 'Meetup posted sucessfully'}, 201)\nreturn ({'message': 'Meetup failed to post'}, 400)", "meetups = meetup.get_all_meetups()\nif meetups:\n return ({'status': 200, 'data': meetups, 'message': 'These are the available meetups'}, 200)\nreturn ({'message': 'No meetup found', 'status': 404}, 404)"], "bodies_text": "<|body_start_0|>\n data = request.get_json()\n if not data:\n return ({'message': 'Please provide the required details', 'status': 400}, 400)\n location = data['location']\n topic = data['topic']\n happeningOn = data['happeningOn']\n if not location or location.isspace():\n return ({'message': 'location must be provided', 'status': 400}, 400)\n if not topic or topic.isspace():\n return ({'message': 'topic must be provided', 'status': 400}, 400)\n if not happeningOn or happeningOn.isspace():\n return ({'message': 'happeningOn must be provided', 'status': 400}, 400)\n if meetup.check_meetup(topic):\n return ({'message': 'meetup already exists', 'status': 400}, 400)\n meetup_record = meetup.create_meetup(location, topic, happeningOn)\n if meetup_record:\n return ({'status': 201, 'data': meetup_record, 'message': 'Meetup posted sucessfully'}, 201)\n return ({'message': 'Meetup failed to post'}, 400)\n<|end_body_0|>\n\n<|body_start_1|>\n meetups = meetup.get_all_meetups()\n if meetups:\n return ({'status': 200, 'data': meetups, 'message': 'These are the available meetups'}, 200)\n return ({'message': 'No meetup found', 'status': 404}, 404)\n<|end_body_1|>\n", "class_docstring": "Endpoint for all meetups functionality", "class_name": "MeetupsEndpoints", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MeetupsEndpoints:\n \"\"\"Endpoint for all meetups functionality\"\"\"\n\n def post(self):\n \"\"\"This endpoint creates a meetup record\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"Endpoint for geting all meetup records\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.get_json()\n if not data:\n return ({'message': 'Please provide the required details', 'status': 400}, 400)\n location = data['location']\n topic = data['topic']\n happeningOn = data['happeningOn']\n if not location or location.isspace():\n return ({'message': 'location must be provided', 'status': 400}, 400)\n if not topic or topic.isspace():\n return ({'message': 'topic must be provided', 'status': 400}, 400)\n if not happeningOn or happeningOn.isspace():\n return ({'message': 'happeningOn must be provided', 'status': 400}, 400)\n if meetup.check_meetup(topic):\n return ({'message': 'meetup already exists', 'status': 400}, 400)\n meetup_record = meetup.create_meetup(location, topic, happeningOn)\n if meetup_record:\n return ({'status': 201, 'data': meetup_record, 'message': 'Meetup posted sucessfully'}, 201)\n return ({'message': 'Meetup failed to post'}, 400)\n<|end_body_0|>\n\n<|body_start_1|>\n meetups = meetup.get_all_meetups()\n if meetups:\n return ({'status': 200, 'data': meetups, 'message': 'These are the available meetups'}, 200)\n return ({'message': 'No meetup found', 'status': 404}, 404)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000251", "length_bytes": 2956, "license_type": "permissive", "methods": [{"docstring": "This endpoint creates a meetup record", "name": "post", "signature": "def post(self)"}, {"docstring": "Endpoint for geting all meetup records", "name": "get", "signature": "def get(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000282", "prompt": "Implement the Python class `MeetupsEndpoints` described below.\n\nClass description:\nEndpoint for all meetups functionality\n\nMethod signatures and docstrings:\n- def post(self): This endpoint creates a meetup record\n- def get(self): Endpoint for geting all meetup records", "prompted_full_text": "Implement the Python class `MeetupsEndpoints` described below.\n\nClass description:\nEndpoint for all meetups functionality\n\nMethod signatures and docstrings:\n- def post(self): This endpoint creates a meetup record\n- def get(self): Endpoint for geting all meetup records\n\n<|skeleton|>\nclass MeetupsEndpoints:\n \"\"\"Endpoint for all meetups functionality\"\"\"\n\n def post(self):\n \"\"\"This endpoint creates a meetup record\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"Endpoint for geting all meetup records\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.get_json()\n if not data:\n return ({'message': 'Please provide the required details', 'status': 400}, 400)\n location = data['location']\n topic = data['topic']\n happeningOn = data['happeningOn']\n if not location or location.isspace():\n return ({'message': 'location must be provided', 'status': 400}, 400)\n if not topic or topic.isspace():\n return ({'message': 'topic must be provided', 'status': 400}, 400)\n if not happeningOn or happeningOn.isspace():\n return ({'message': 'happeningOn must be provided', 'status': 400}, 400)\n if meetup.check_meetup(topic):\n return ({'message': 'meetup already exists', 'status': 400}, 400)\n meetup_record = meetup.create_meetup(location, topic, happeningOn)\n if meetup_record:\n return ({'status': 201, 'data': meetup_record, 'message': 'Meetup posted sucessfully'}, 201)\n return ({'message': 'Meetup failed to post'}, 400)\n<|end_body_0|>\n\n<|body_start_1|>\n meetups = meetup.get_all_meetups()\n if meetups:\n return ({'status': 200, 'data': meetups, 'message': 'These are the available meetups'}, 200)\n return ({'message': 'No meetup found', 'status': 404}, 404)\n<|end_body_1|>\n", "revision_id": "e1440ce75a52757278b48b19223f81b95f0b1eee", "skeleton": "<|skeleton|>\nclass MeetupsEndpoints:\n \"\"\"Endpoint for all meetups functionality\"\"\"\n\n def post(self):\n \"\"\"This endpoint creates a meetup record\"\"\"\n <|body_0|>\n\n def get(self):\n \"\"\"Endpoint for geting all meetup records\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MeetupsEndpoints:\n \"\"\"Endpoint for all meetups functionality\"\"\"\n\n def post(self):\n \"\"\"This endpoint creates a meetup record\"\"\"\n data = request.get_json()\n if not data:\n return ({'message': 'Please provide the required details', 'status': 400}, 400)\n location = data['location']\n topic = data['topic']\n happeningOn = data['happeningOn']\n if not location or location.isspace():\n return ({'message': 'location must be provided', 'status': 400}, 400)\n if not topic or topic.isspace():\n return ({'message': 'topic must be provided', 'status': 400}, 400)\n if not happeningOn or happeningOn.isspace():\n return ({'message': 'happeningOn must be provided', 'status': 400}, 400)\n if meetup.check_meetup(topic):\n return ({'message': 'meetup already exists', 'status': 400}, 400)\n meetup_record = meetup.create_meetup(location, topic, happeningOn)\n if meetup_record:\n return ({'status': 201, 'data': meetup_record, 'message': 'Meetup posted sucessfully'}, 201)\n return ({'message': 'Meetup failed to post'}, 400)\n\n def get(self):\n \"\"\"Endpoint for geting all meetup records\"\"\"\n meetups = meetup.get_all_meetups()\n if meetups:\n return ({'status': 200, 'data': meetups, 'message': 'These are the available meetups'}, 200)\n return ({'message': 'No meetup found', 'status': 404}, 404)\n", "source": "the_stack_v2_python_sparse", "source_path": "app/api/v2/views/meetups.py", "source_repo": "MaggieKimani1/QUESTIONER-API-V2", "split": "test", "star_events_count": 0} {"blob_id": "affe38a5d9fe6eec60007447d21ca353245f4f56", "bodies": ["super(Print, self).__init__(source_info)\nself.tensor = tensor\nself.reshape = reshape", "if TikDebug.tik_debug:\n return\nself.dump_tensor(context, self.tensor, self.reshape, self.source_info)", "value = np_buf\nmin_slice_extent = 1\nt_indice = Print.get_t_indice(context, raw_indice)\nbuf = value.buffer\nif len(t_indice) != len(value.buffer.shape):\n buf = buf.reshape(-1)\n lenght = len(buf)\n first_slice = t_indice[0]\n start_ = first_slice.start\n end_ = lenght\n if first_slice.stop - first_slice.start > min_slice_extent:\n end_ = first_slice.stop\n t_indice = tuple([slice(start_, end_)])\nbuf = buf.__getitem__(t_indice)\nif reshape is not None:\n print(t_indice)\n try:\n buf = buf.reshape(reshape)\n except ValueError as exc:\n print(exc)\nreturn buf", "indice = []\nfor rid in raw_indice:\n if isinstance(rid, slice):\n ri_start = context.evaluate_expr(rid.start)\n ri_step = context.evaluate_expr(rid.step)\n ri_stop = context.evaluate_expr(rid.stop)\n rid = slice(ri_start, ri_stop, ri_step)\n else:\n rid = context.evaluate_expr(rid)\n indice.append(rid)\nt_indice = tuple(indice)\nreturn t_indice", "np_buf = context.get_value(tensor)\nraw_indice = tensor.indice.indice\nbuf = Print.get_tensor(context, np_buf, raw_indice, reshape)\nif source_info:\n print(source_info)\nprint(tensor.name + '.data (id:{}):\\n'.format(id(buf)) + str(buf))\nprint(tensor.name + '.shape:' + str(buf.shape) + ' dtype=' + str(buf.dtype))"], "bodies_text": "<|body_start_0|>\n super(Print, self).__init__(source_info)\n self.tensor = tensor\n self.reshape = reshape\n<|end_body_0|>\n\n<|body_start_1|>\n if TikDebug.tik_debug:\n return\n self.dump_tensor(context, self.tensor, self.reshape, self.source_info)\n<|end_body_1|>\n\n<|body_start_2|>\n value = np_buf\n min_slice_extent = 1\n t_indice = Print.get_t_indice(context, raw_indice)\n buf = value.buffer\n if len(t_indice) != len(value.buffer.shape):\n buf = buf.reshape(-1)\n lenght = len(buf)\n first_slice = t_indice[0]\n start_ = first_slice.start\n end_ = lenght\n if first_slice.stop - first_slice.start > min_slice_extent:\n end_ = first_slice.stop\n t_indice = tuple([slice(start_, end_)])\n buf = buf.__getitem__(t_indice)\n if reshape is not None:\n print(t_indice)\n try:\n buf = buf.reshape(reshape)\n except ValueError as exc:\n print(exc)\n return buf\n<|end_body_2|>\n\n<|body_start_3|>\n indice = []\n for rid in raw_indice:\n if isinstance(rid, slice):\n ri_start = context.evaluate_expr(rid.start)\n ri_step = context.evaluate_expr(rid.step)\n ri_stop = context.evaluate_expr(rid.stop)\n rid = slice(ri_start, ri_stop, ri_step)\n else:\n rid = context.evaluate_expr(rid)\n indice.append(rid)\n t_indice = tuple(indice)\n return t_indice\n<|end_body_3|>\n\n<|body_start_4|>\n np_buf = context.get_value(tensor)\n raw_indice = tensor.indice.indice\n buf = Print.get_tensor(context, np_buf, raw_indice, reshape)\n if source_info:\n print(source_info)\n print(tensor.name + '.data (id:{}):\\n'.format(id(buf)) + str(buf))\n print(tensor.name + '.shape:' + str(buf.shape) + ' dtype=' + str(buf.dtype))\n<|end_body_4|>\n", "class_docstring": "Class Print inherits from Print", "class_name": "Print", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Print:\n \"\"\"Class Print inherits from Print\"\"\"\n\n def __init__(self, tensor, reshape, source_info):\n \"\"\"Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns\"\"\"\n <|body_0|>\n\n def eval_(self, context):\n \"\"\"Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None\"\"\"\n <|body_1|>\n\n def get_tensor(context, np_buf, raw_indice, reshape=None):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer\"\"\"\n <|body_2|>\n\n def get_t_indice(context, raw_indice):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice\"\"\"\n <|body_3|>\n\n def dump_tensor(context, tensor, reshape=None, source_info=None):\n \"\"\"To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Print, self).__init__(source_info)\n self.tensor = tensor\n self.reshape = reshape\n<|end_body_0|>\n\n<|body_start_1|>\n if TikDebug.tik_debug:\n return\n self.dump_tensor(context, self.tensor, self.reshape, self.source_info)\n<|end_body_1|>\n\n<|body_start_2|>\n value = np_buf\n min_slice_extent = 1\n t_indice = Print.get_t_indice(context, raw_indice)\n buf = value.buffer\n if len(t_indice) != len(value.buffer.shape):\n buf = buf.reshape(-1)\n lenght = len(buf)\n first_slice = t_indice[0]\n start_ = first_slice.start\n end_ = lenght\n if first_slice.stop - first_slice.start > min_slice_extent:\n end_ = first_slice.stop\n t_indice = tuple([slice(start_, end_)])\n buf = buf.__getitem__(t_indice)\n if reshape is not None:\n print(t_indice)\n try:\n buf = buf.reshape(reshape)\n except ValueError as exc:\n print(exc)\n return buf\n<|end_body_2|>\n\n<|body_start_3|>\n indice = []\n for rid in raw_indice:\n if isinstance(rid, slice):\n ri_start = context.evaluate_expr(rid.start)\n ri_step = context.evaluate_expr(rid.step)\n ri_stop = context.evaluate_expr(rid.stop)\n rid = slice(ri_start, ri_stop, ri_step)\n else:\n rid = context.evaluate_expr(rid)\n indice.append(rid)\n t_indice = tuple(indice)\n return t_indice\n<|end_body_3|>\n\n<|body_start_4|>\n np_buf = context.get_value(tensor)\n raw_indice = tensor.indice.indice\n buf = Print.get_tensor(context, np_buf, raw_indice, reshape)\n if source_info:\n print(source_info)\n print(tensor.name + '.data (id:{}):\\n'.format(id(buf)) + str(buf))\n print(tensor.name + '.shape:' + str(buf.shape) + ' dtype=' + str(buf.dtype))\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000252", "length_bytes": 42515, "license_type": "no_license", "methods": [{"docstring": "Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns", "name": "__init__", "signature": "def __init__(self, tensor, reshape, source_info)"}, {"docstring": "Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None", "name": "eval_", "signature": "def eval_(self, context)"}, {"docstring": "Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer", "name": "get_tensor", "signature": "def get_tensor(context, np_buf, raw_indice, reshape=None)"}, {"docstring": "Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice", "name": "get_t_indice", "signature": "def get_t_indice(context, raw_indice)"}, {"docstring": "To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns", "name": "dump_tensor", "signature": "def dump_tensor(context, tensor, reshape=None, source_info=None)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_049672", "prompt": "Implement the Python class `Print` described below.\n\nClass description:\nClass Print inherits from Print\n\nMethod signatures and docstrings:\n- def __init__(self, tensor, reshape, source_info): Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns\n- def eval_(self, context): Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None\n- def get_tensor(context, np_buf, raw_indice, reshape=None): Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer\n- def get_t_indice(context, raw_indice): Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice\n- def dump_tensor(context, tensor, reshape=None, source_info=None): To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns", "prompted_full_text": "Implement the Python class `Print` described below.\n\nClass description:\nClass Print inherits from Print\n\nMethod signatures and docstrings:\n- def __init__(self, tensor, reshape, source_info): Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns\n- def eval_(self, context): Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None\n- def get_tensor(context, np_buf, raw_indice, reshape=None): Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer\n- def get_t_indice(context, raw_indice): Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice\n- def dump_tensor(context, tensor, reshape=None, source_info=None): To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns\n\n<|skeleton|>\nclass Print:\n \"\"\"Class Print inherits from Print\"\"\"\n\n def __init__(self, tensor, reshape, source_info):\n \"\"\"Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns\"\"\"\n <|body_0|>\n\n def eval_(self, context):\n \"\"\"Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None\"\"\"\n <|body_1|>\n\n def get_tensor(context, np_buf, raw_indice, reshape=None):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer\"\"\"\n <|body_2|>\n\n def get_t_indice(context, raw_indice):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice\"\"\"\n <|body_3|>\n\n def dump_tensor(context, tensor, reshape=None, source_info=None):\n \"\"\"To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(Print, self).__init__(source_info)\n self.tensor = tensor\n self.reshape = reshape\n<|end_body_0|>\n\n<|body_start_1|>\n if TikDebug.tik_debug:\n return\n self.dump_tensor(context, self.tensor, self.reshape, self.source_info)\n<|end_body_1|>\n\n<|body_start_2|>\n value = np_buf\n min_slice_extent = 1\n t_indice = Print.get_t_indice(context, raw_indice)\n buf = value.buffer\n if len(t_indice) != len(value.buffer.shape):\n buf = buf.reshape(-1)\n lenght = len(buf)\n first_slice = t_indice[0]\n start_ = first_slice.start\n end_ = lenght\n if first_slice.stop - first_slice.start > min_slice_extent:\n end_ = first_slice.stop\n t_indice = tuple([slice(start_, end_)])\n buf = buf.__getitem__(t_indice)\n if reshape is not None:\n print(t_indice)\n try:\n buf = buf.reshape(reshape)\n except ValueError as exc:\n print(exc)\n return buf\n<|end_body_2|>\n\n<|body_start_3|>\n indice = []\n for rid in raw_indice:\n if isinstance(rid, slice):\n ri_start = context.evaluate_expr(rid.start)\n ri_step = context.evaluate_expr(rid.step)\n ri_stop = context.evaluate_expr(rid.stop)\n rid = slice(ri_start, ri_stop, ri_step)\n else:\n rid = context.evaluate_expr(rid)\n indice.append(rid)\n t_indice = tuple(indice)\n return t_indice\n<|end_body_3|>\n\n<|body_start_4|>\n np_buf = context.get_value(tensor)\n raw_indice = tensor.indice.indice\n buf = Print.get_tensor(context, np_buf, raw_indice, reshape)\n if source_info:\n print(source_info)\n print(tensor.name + '.data (id:{}):\\n'.format(id(buf)) + str(buf))\n print(tensor.name + '.shape:' + str(buf.shape) + ' dtype=' + str(buf.dtype))\n<|end_body_4|>\n", "revision_id": "148511a31bfd195df889291946c43bb585acb546", "skeleton": "<|skeleton|>\nclass Print:\n \"\"\"Class Print inherits from Print\"\"\"\n\n def __init__(self, tensor, reshape, source_info):\n \"\"\"Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns\"\"\"\n <|body_0|>\n\n def eval_(self, context):\n \"\"\"Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None\"\"\"\n <|body_1|>\n\n def get_tensor(context, np_buf, raw_indice, reshape=None):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer\"\"\"\n <|body_2|>\n\n def get_t_indice(context, raw_indice):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice\"\"\"\n <|body_3|>\n\n def dump_tensor(context, tensor, reshape=None, source_info=None):\n \"\"\"To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Print:\n \"\"\"Class Print inherits from Print\"\"\"\n\n def __init__(self, tensor, reshape, source_info):\n \"\"\"Initialize class Print Parameters ---------- tensor: a type of Tensor reshape:reshape the buffer source_info:source code information It represents the relationship of current node with source code Returns ---------- No returns\"\"\"\n super(Print, self).__init__(source_info)\n self.tensor = tensor\n self.reshape = reshape\n\n def eval_(self, context):\n \"\"\"Eval function evaluate all of self.function Parameters ---------- context:information of debugger store all of debugger's information Returns ---------- None\"\"\"\n if TikDebug.tik_debug:\n return\n self.dump_tensor(context, self.tensor, self.reshape, self.source_info)\n\n def get_tensor(context, np_buf, raw_indice, reshape=None):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information np_buf:NumpyBuffer raw_indice: index of raw reshape:None Returns ---------- buf:NumpyBuffer\"\"\"\n value = np_buf\n min_slice_extent = 1\n t_indice = Print.get_t_indice(context, raw_indice)\n buf = value.buffer\n if len(t_indice) != len(value.buffer.shape):\n buf = buf.reshape(-1)\n lenght = len(buf)\n first_slice = t_indice[0]\n start_ = first_slice.start\n end_ = lenght\n if first_slice.stop - first_slice.start > min_slice_extent:\n end_ = first_slice.stop\n t_indice = tuple([slice(start_, end_)])\n buf = buf.__getitem__(t_indice)\n if reshape is not None:\n print(t_indice)\n try:\n buf = buf.reshape(reshape)\n except ValueError as exc:\n print(exc)\n return buf\n\n def get_t_indice(context, raw_indice):\n \"\"\"Get Tensor Parameters ---------- context:context:information of debugger store all of debugger's information raw_indice: index of raw Returns ---------- t_indice\"\"\"\n indice = []\n for rid in raw_indice:\n if isinstance(rid, slice):\n ri_start = context.evaluate_expr(rid.start)\n ri_step = context.evaluate_expr(rid.step)\n ri_stop = context.evaluate_expr(rid.stop)\n rid = slice(ri_start, ri_stop, ri_step)\n else:\n rid = context.evaluate_expr(rid)\n indice.append(rid)\n t_indice = tuple(indice)\n return t_indice\n\n def dump_tensor(context, tensor, reshape=None, source_info=None):\n \"\"\"To dump the given tensor Parameters --------- context:context:information of debugger store all of debugger's information tensor: a type of Tensor reshape:None source_info:None Returns ---------- No returns\"\"\"\n np_buf = context.get_value(tensor)\n raw_indice = tensor.indice.indice\n buf = Print.get_tensor(context, np_buf, raw_indice, reshape)\n if source_info:\n print(source_info)\n print(tensor.name + '.data (id:{}):\\n'.format(id(buf)) + str(buf))\n print(tensor.name + '.shape:' + str(buf.shape) + ' dtype=' + str(buf.dtype))\n", "source": "the_stack_v2_python_sparse", "source_path": "convertor/huawei/te/tik/debug/statement.py", "source_repo": "jizhuoran/caffe-huawei-atlas-convertor", "split": "test", "star_events_count": 4} {"blob_id": "49f1de400275ef196b5c3ba57d1c4223e5de3094", "bodies": ["super(UNet2, self).__init__()\nself.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)\nself.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)\nself.down1 = down(32, 64, 5)\nself.down2 = down(64, 128, 3)\nself.down3 = down(128, 256, 3)\nself.down4 = down(256, 512, 3)\nself.down5 = down(512, 512, 3)\nself.up1 = up(512, 512)\nself.up2 = up(512, 256)\nself.up3 = up(256, 128)\nself.up4 = up(128, 64)\nself.up5 = up(64, 32)\nself.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)", "x = F.leaky_relu(self.conv1(x), negative_slope=0.1)\ns1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)\ns2 = self.down1(s1)\ns3 = self.down2(s2)\ns4 = self.down3(s3)\ns5 = self.down4(s4)\nx = self.down5(s5)\nx = self.up1(x, s5)\nx = self.up2(x, s4)\nx = self.up3(x, s3)\nx = self.up4(x, s2)\nx1 = self.up5(x, s1)\nx = F.leaky_relu(self.conv3(x1), negative_slope=0.1)\nreturn (x, x1)"], "bodies_text": "<|body_start_0|>\n super(UNet2, self).__init__()\n self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)\n self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)\n self.down1 = down(32, 64, 5)\n self.down2 = down(64, 128, 3)\n self.down3 = down(128, 256, 3)\n self.down4 = down(256, 512, 3)\n self.down5 = down(512, 512, 3)\n self.up1 = up(512, 512)\n self.up2 = up(512, 256)\n self.up3 = up(256, 128)\n self.up4 = up(128, 64)\n self.up5 = up(64, 32)\n self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = F.leaky_relu(self.conv1(x), negative_slope=0.1)\n s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)\n s2 = self.down1(s1)\n s3 = self.down2(s2)\n s4 = self.down3(s3)\n s5 = self.down4(s4)\n x = self.down5(s5)\n x = self.up1(x, s5)\n x = self.up2(x, s4)\n x = self.up3(x, s3)\n x = self.up4(x, s2)\n x1 = self.up5(x, s1)\n x = F.leaky_relu(self.conv3(x1), negative_slope=0.1)\n return (x, x1)\n<|end_body_1|>\n", "class_docstring": "A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.", "class_name": "UNet2", "detected_licenses": ["GPL-1.0-or-later", "Apache-2.0", "MIT", "BSD-2-Clause", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UNet2:\n \"\"\"A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.\"\"\"\n\n def __init__(self, inChannels, outChannels):\n \"\"\"Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(UNet2, self).__init__()\n self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)\n self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)\n self.down1 = down(32, 64, 5)\n self.down2 = down(64, 128, 3)\n self.down3 = down(128, 256, 3)\n self.down4 = down(256, 512, 3)\n self.down5 = down(512, 512, 3)\n self.up1 = up(512, 512)\n self.up2 = up(512, 256)\n self.up3 = up(256, 128)\n self.up4 = up(128, 64)\n self.up5 = up(64, 32)\n self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = F.leaky_relu(self.conv1(x), negative_slope=0.1)\n s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)\n s2 = self.down1(s1)\n s3 = self.down2(s2)\n s4 = self.down3(s3)\n s5 = self.down4(s4)\n x = self.down5(s5)\n x = self.up1(x, s5)\n x = self.up2(x, s4)\n x = self.up3(x, s3)\n x = self.up4(x, s2)\n x1 = self.up5(x, s1)\n x = F.leaky_relu(self.conv3(x1), negative_slope=0.1)\n return (x, x1)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000253", "length_bytes": 6643, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.", "name": "__init__", "signature": "def __init__(self, inChannels, outChannels)"}, {"docstring": "Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021579", "prompt": "Implement the Python class `UNet2` described below.\n\nClass description:\nA class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.\n\nMethod signatures and docstrings:\n- def __init__(self, inChannels, outChannels): Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.\n- def forward(self, x): Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.", "prompted_full_text": "Implement the Python class `UNet2` described below.\n\nClass description:\nA class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.\n\nMethod signatures and docstrings:\n- def __init__(self, inChannels, outChannels): Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.\n- def forward(self, x): Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.\n\n<|skeleton|>\nclass UNet2:\n \"\"\"A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.\"\"\"\n\n def __init__(self, inChannels, outChannels):\n \"\"\"Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(UNet2, self).__init__()\n self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)\n self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)\n self.down1 = down(32, 64, 5)\n self.down2 = down(64, 128, 3)\n self.down3 = down(128, 256, 3)\n self.down4 = down(256, 512, 3)\n self.down5 = down(512, 512, 3)\n self.up1 = up(512, 512)\n self.up2 = up(512, 256)\n self.up3 = up(256, 128)\n self.up4 = up(128, 64)\n self.up5 = up(64, 32)\n self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)\n<|end_body_0|>\n\n<|body_start_1|>\n x = F.leaky_relu(self.conv1(x), negative_slope=0.1)\n s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)\n s2 = self.down1(s1)\n s3 = self.down2(s2)\n s4 = self.down3(s3)\n s5 = self.down4(s4)\n x = self.down5(s5)\n x = self.up1(x, s5)\n x = self.up2(x, s4)\n x = self.up3(x, s3)\n x = self.up4(x, s2)\n x1 = self.up5(x, s1)\n x = F.leaky_relu(self.conv3(x1), negative_slope=0.1)\n return (x, x1)\n<|end_body_1|>\n", "revision_id": "92acc188d3a0f634de58463b6676e70df83ef808", "skeleton": "<|skeleton|>\nclass UNet2:\n \"\"\"A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.\"\"\"\n\n def __init__(self, inChannels, outChannels):\n \"\"\"Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UNet2:\n \"\"\"A class for creating UNet like architecture as specified by the Super SloMo paper. ... Methods ------- forward(x) Returns output tensor after passing input `x` to the neural network block.\"\"\"\n\n def __init__(self, inChannels, outChannels):\n \"\"\"Parameters ---------- inChannels : int number of input channels for the UNet. outChannels : int number of output channels for the UNet.\"\"\"\n super(UNet2, self).__init__()\n self.conv1 = nn.Conv2d(inChannels, 32, 7, stride=1, padding=3)\n self.conv2 = nn.Conv2d(32, 32, 7, stride=1, padding=3)\n self.down1 = down(32, 64, 5)\n self.down2 = down(64, 128, 3)\n self.down3 = down(128, 256, 3)\n self.down4 = down(256, 512, 3)\n self.down5 = down(512, 512, 3)\n self.up1 = up(512, 512)\n self.up2 = up(512, 256)\n self.up3 = up(256, 128)\n self.up4 = up(128, 64)\n self.up5 = up(64, 32)\n self.conv3 = nn.Conv2d(32, outChannels, 3, stride=1, padding=1)\n\n def forward(self, x):\n \"\"\"Returns output tensor after passing input `x` to the neural network. Parameters ---------- x : tensor input to the UNet. Returns ------- tensor output of the UNet.\"\"\"\n x = F.leaky_relu(self.conv1(x), negative_slope=0.1)\n s1 = F.leaky_relu(self.conv2(x), negative_slope=0.1)\n s2 = self.down1(s1)\n s3 = self.down2(s2)\n s4 = self.down3(s3)\n s5 = self.down4(s4)\n x = self.down5(s5)\n x = self.up1(x, s5)\n x = self.up2(x, s4)\n x = self.up3(x, s3)\n x = self.up4(x, s2)\n x1 = self.up5(x, s1)\n x = F.leaky_relu(self.conv3(x1), negative_slope=0.1)\n return (x, x1)\n", "source": "the_stack_v2_python_sparse", "source_path": "PyTorch/contrib/cv/video/QVI_ID2930_for_PyTorch/models/UNet2.py", "source_repo": "Ascend/ModelZoo-PyTorch", "split": "test", "star_events_count": 23} {"blob_id": "ea9e14053c3cbd5b90c142ceff775c96ba8707ab", "bodies": ["i = 0\nfor num in nums:\n if num != val:\n nums[i] = num\n i += 1\nreturn i", "for i in range(len(nums) - 1, -1, -1):\n if nums[i] == val:\n nums.pop(i)\nreturn len(nums)"], "bodies_text": "<|body_start_0|>\n i = 0\n for num in nums:\n if num != val:\n nums[i] = num\n i += 1\n return i\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(len(nums) - 1, -1, -1):\n if nums[i] == val:\n nums.pop(i)\n return len(nums)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def removeElement_MK1(self, nums: List[int], val: int) -> int:\n \"\"\"双指针法 Approach 1: Two Pointers\"\"\"\n <|body_0|>\n\n def removeElement_MK2(self, nums: List[int], val: int) -> int:\n \"\"\"改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = 0\n for num in nums:\n if num != val:\n nums[i] = num\n i += 1\n return i\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(len(nums) - 1, -1, -1):\n if nums[i] == val:\n nums.pop(i)\n return len(nums)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000254", "length_bytes": 692, "license_type": "no_license", "methods": [{"docstring": "双指针法 Approach 1: Two Pointers", "name": "removeElement_MK1", "signature": "def removeElement_MK1(self, nums: List[int], val: int) -> int"}, {"docstring": "改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare", "name": "removeElement_MK2", "signature": "def removeElement_MK2(self, nums: List[int], val: int) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007685", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeElement_MK1(self, nums: List[int], val: int) -> int: 双指针法 Approach 1: Two Pointers\n- def removeElement_MK2(self, nums: List[int], val: int) -> int: 改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def removeElement_MK1(self, nums: List[int], val: int) -> int: 双指针法 Approach 1: Two Pointers\n- def removeElement_MK2(self, nums: List[int], val: int) -> int: 改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare\n\n<|skeleton|>\nclass Solution:\n\n def removeElement_MK1(self, nums: List[int], val: int) -> int:\n \"\"\"双指针法 Approach 1: Two Pointers\"\"\"\n <|body_0|>\n\n def removeElement_MK2(self, nums: List[int], val: int) -> int:\n \"\"\"改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n i = 0\n for num in nums:\n if num != val:\n nums[i] = num\n i += 1\n return i\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(len(nums) - 1, -1, -1):\n if nums[i] == val:\n nums.pop(i)\n return len(nums)\n<|end_body_1|>\n", "revision_id": "d7ba416d22becfa8f2a2ae4eee04c86617cd9332", "skeleton": "<|skeleton|>\nclass Solution:\n\n def removeElement_MK1(self, nums: List[int], val: int) -> int:\n \"\"\"双指针法 Approach 1: Two Pointers\"\"\"\n <|body_0|>\n\n def removeElement_MK2(self, nums: List[int], val: int) -> int:\n \"\"\"改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def removeElement_MK1(self, nums: List[int], val: int) -> int:\n \"\"\"双指针法 Approach 1: Two Pointers\"\"\"\n i = 0\n for num in nums:\n if num != val:\n nums[i] = num\n i += 1\n return i\n\n def removeElement_MK2(self, nums: List[int], val: int) -> int:\n \"\"\"改进的双指针法,当要删除的元素很少时,速度更快 Approach 2: Two Pointers - when elements to remove are rare\"\"\"\n for i in range(len(nums) - 1, -1, -1):\n if nums[i] == val:\n nums.pop(i)\n return len(nums)\n", "source": "the_stack_v2_python_sparse", "source_path": "0027. Remove Element/Solution.py", "source_repo": "faterazer/LeetCode", "split": "test", "star_events_count": 4} {"blob_id": "2350d205b85fbe7316b4472a04a84ed1186f84f2", "bodies": ["mongo = main.MongoDBConnection()\nwith mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()", "mongo = main.MongoDBConnection()\nwith mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()", "directory_name = './csv_files'\nproduct_file = 'products.csv'\ncustomer_file = 'customers.csv'\nrentals_file = 'rentals.csv'\nresults = main.import_data(directory_name, product_file, customer_file, rentals_file)\ncompare = ((4, 6, 4), (0, 0, 0))\nself.assertEqual(results, compare)", "directory_name = './csv_files'\nproduct_file = 'products.csv'\ncustomer_file = 'customers.csv'\nrentals_file = 'rentals.csv'\nmain.import_data(directory_name, product_file, customer_file, rentals_file)\nresults = main.show_available_products()\ncompare = {'prd001': {'description': '65-Inch TV', 'product_type': 'livingroom', 'quantity_available': '5'}, 'prd003': {'description': 'Queen Bed', 'product_type': 'bedroom', 'quantity_available': '4'}, 'prd005': {'description': 'Dish Washer', 'product_type': 'kitchen', 'quantity_available': '3'}}\nself.assertEqual(results, compare)", "directory_name = './csv_files'\nproduct_file = 'products.csv'\ncustomer_file = 'customers.csv'\nrentals_file = 'rentals.csv'\nmain.import_data(directory_name, product_file, customer_file, rentals_file)\nresults = main.show_rentals('prd002')\ncompare = {'user004': {'user_id': 'user004', 'name': 'Luke Organa', 'address': '1235 Aldern Lane', 'phone_number': '4796268775', 'email': 'sw@storm.net'}, 'user002': {'user_id': 'user002', 'name': 'Anna Light', 'address': '568 Elder Road', 'phone_number': '5612378451', 'email': 'anna@yahoo.com'}}\nself.assertEqual(results, compare)"], "bodies_text": "<|body_start_0|>\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n<|end_body_0|>\n\n<|body_start_1|>\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n<|end_body_1|>\n\n<|body_start_2|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n results = main.import_data(directory_name, product_file, customer_file, rentals_file)\n compare = ((4, 6, 4), (0, 0, 0))\n self.assertEqual(results, compare)\n<|end_body_2|>\n\n<|body_start_3|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_available_products()\n compare = {'prd001': {'description': '65-Inch TV', 'product_type': 'livingroom', 'quantity_available': '5'}, 'prd003': {'description': 'Queen Bed', 'product_type': 'bedroom', 'quantity_available': '4'}, 'prd005': {'description': 'Dish Washer', 'product_type': 'kitchen', 'quantity_available': '3'}}\n self.assertEqual(results, compare)\n<|end_body_3|>\n\n<|body_start_4|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_rentals('prd002')\n compare = {'user004': {'user_id': 'user004', 'name': 'Luke Organa', 'address': '1235 Aldern Lane', 'phone_number': '4796268775', 'email': 'sw@storm.net'}, 'user002': {'user_id': 'user002', 'name': 'Anna Light', 'address': '568 Elder Road', 'phone_number': '5612378451', 'email': 'anna@yahoo.com'}}\n self.assertEqual(results, compare)\n<|end_body_4|>\n", "class_docstring": "Tests the functionality of the Mongo Database", "class_name": "TestDatabase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestDatabase:\n \"\"\"Tests the functionality of the Mongo Database\"\"\"\n\n def setUp(self):\n \"\"\"Setting up the database for the tests\"\"\"\n <|body_0|>\n\n def tearDown(self):\n \"\"\"Tearing down anything created or used for testing purposes\"\"\"\n <|body_1|>\n\n def test_import_data(self):\n \"\"\"Testing the import_data funtion\"\"\"\n <|body_2|>\n\n def test_show_available_products(self):\n \"\"\"Testing functionality of showing all available products in the database\"\"\"\n <|body_3|>\n\n def test_show_rentals(self):\n \"\"\"Testing the functionality of showing all users who rented a given product\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n<|end_body_0|>\n\n<|body_start_1|>\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n<|end_body_1|>\n\n<|body_start_2|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n results = main.import_data(directory_name, product_file, customer_file, rentals_file)\n compare = ((4, 6, 4), (0, 0, 0))\n self.assertEqual(results, compare)\n<|end_body_2|>\n\n<|body_start_3|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_available_products()\n compare = {'prd001': {'description': '65-Inch TV', 'product_type': 'livingroom', 'quantity_available': '5'}, 'prd003': {'description': 'Queen Bed', 'product_type': 'bedroom', 'quantity_available': '4'}, 'prd005': {'description': 'Dish Washer', 'product_type': 'kitchen', 'quantity_available': '3'}}\n self.assertEqual(results, compare)\n<|end_body_3|>\n\n<|body_start_4|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_rentals('prd002')\n compare = {'user004': {'user_id': 'user004', 'name': 'Luke Organa', 'address': '1235 Aldern Lane', 'phone_number': '4796268775', 'email': 'sw@storm.net'}, 'user002': {'user_id': 'user002', 'name': 'Anna Light', 'address': '568 Elder Road', 'phone_number': '5612378451', 'email': 'anna@yahoo.com'}}\n self.assertEqual(results, compare)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000255", "length_bytes": 3499, "license_type": "no_license", "methods": [{"docstring": "Setting up the database for the tests", "name": "setUp", "signature": "def setUp(self)"}, {"docstring": "Tearing down anything created or used for testing purposes", "name": "tearDown", "signature": "def tearDown(self)"}, {"docstring": "Testing the import_data funtion", "name": "test_import_data", "signature": "def test_import_data(self)"}, {"docstring": "Testing functionality of showing all available products in the database", "name": "test_show_available_products", "signature": "def test_show_available_products(self)"}, {"docstring": "Testing the functionality of showing all users who rented a given product", "name": "test_show_rentals", "signature": "def test_show_rentals(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_052565", "prompt": "Implement the Python class `TestDatabase` described below.\n\nClass description:\nTests the functionality of the Mongo Database\n\nMethod signatures and docstrings:\n- def setUp(self): Setting up the database for the tests\n- def tearDown(self): Tearing down anything created or used for testing purposes\n- def test_import_data(self): Testing the import_data funtion\n- def test_show_available_products(self): Testing functionality of showing all available products in the database\n- def test_show_rentals(self): Testing the functionality of showing all users who rented a given product", "prompted_full_text": "Implement the Python class `TestDatabase` described below.\n\nClass description:\nTests the functionality of the Mongo Database\n\nMethod signatures and docstrings:\n- def setUp(self): Setting up the database for the tests\n- def tearDown(self): Tearing down anything created or used for testing purposes\n- def test_import_data(self): Testing the import_data funtion\n- def test_show_available_products(self): Testing functionality of showing all available products in the database\n- def test_show_rentals(self): Testing the functionality of showing all users who rented a given product\n\n<|skeleton|>\nclass TestDatabase:\n \"\"\"Tests the functionality of the Mongo Database\"\"\"\n\n def setUp(self):\n \"\"\"Setting up the database for the tests\"\"\"\n <|body_0|>\n\n def tearDown(self):\n \"\"\"Tearing down anything created or used for testing purposes\"\"\"\n <|body_1|>\n\n def test_import_data(self):\n \"\"\"Testing the import_data funtion\"\"\"\n <|body_2|>\n\n def test_show_available_products(self):\n \"\"\"Testing functionality of showing all available products in the database\"\"\"\n <|body_3|>\n\n def test_show_rentals(self):\n \"\"\"Testing the functionality of showing all users who rented a given product\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n<|end_body_0|>\n\n<|body_start_1|>\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n<|end_body_1|>\n\n<|body_start_2|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n results = main.import_data(directory_name, product_file, customer_file, rentals_file)\n compare = ((4, 6, 4), (0, 0, 0))\n self.assertEqual(results, compare)\n<|end_body_2|>\n\n<|body_start_3|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_available_products()\n compare = {'prd001': {'description': '65-Inch TV', 'product_type': 'livingroom', 'quantity_available': '5'}, 'prd003': {'description': 'Queen Bed', 'product_type': 'bedroom', 'quantity_available': '4'}, 'prd005': {'description': 'Dish Washer', 'product_type': 'kitchen', 'quantity_available': '3'}}\n self.assertEqual(results, compare)\n<|end_body_3|>\n\n<|body_start_4|>\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_rentals('prd002')\n compare = {'user004': {'user_id': 'user004', 'name': 'Luke Organa', 'address': '1235 Aldern Lane', 'phone_number': '4796268775', 'email': 'sw@storm.net'}, 'user002': {'user_id': 'user002', 'name': 'Anna Light', 'address': '568 Elder Road', 'phone_number': '5612378451', 'email': 'anna@yahoo.com'}}\n self.assertEqual(results, compare)\n<|end_body_4|>\n", "revision_id": "5dac60f39e3909ff05b26721d602ed20f14d6be3", "skeleton": "<|skeleton|>\nclass TestDatabase:\n \"\"\"Tests the functionality of the Mongo Database\"\"\"\n\n def setUp(self):\n \"\"\"Setting up the database for the tests\"\"\"\n <|body_0|>\n\n def tearDown(self):\n \"\"\"Tearing down anything created or used for testing purposes\"\"\"\n <|body_1|>\n\n def test_import_data(self):\n \"\"\"Testing the import_data funtion\"\"\"\n <|body_2|>\n\n def test_show_available_products(self):\n \"\"\"Testing functionality of showing all available products in the database\"\"\"\n <|body_3|>\n\n def test_show_rentals(self):\n \"\"\"Testing the functionality of showing all users who rented a given product\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestDatabase:\n \"\"\"Tests the functionality of the Mongo Database\"\"\"\n\n def setUp(self):\n \"\"\"Setting up the database for the tests\"\"\"\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n\n def tearDown(self):\n \"\"\"Tearing down anything created or used for testing purposes\"\"\"\n mongo = main.MongoDBConnection()\n with mongo:\n db = mongo.connection.storeDB\n db['customers'].drop()\n db['products'].drop()\n db['rentals'].drop()\n\n def test_import_data(self):\n \"\"\"Testing the import_data funtion\"\"\"\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n results = main.import_data(directory_name, product_file, customer_file, rentals_file)\n compare = ((4, 6, 4), (0, 0, 0))\n self.assertEqual(results, compare)\n\n def test_show_available_products(self):\n \"\"\"Testing functionality of showing all available products in the database\"\"\"\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_available_products()\n compare = {'prd001': {'description': '65-Inch TV', 'product_type': 'livingroom', 'quantity_available': '5'}, 'prd003': {'description': 'Queen Bed', 'product_type': 'bedroom', 'quantity_available': '4'}, 'prd005': {'description': 'Dish Washer', 'product_type': 'kitchen', 'quantity_available': '3'}}\n self.assertEqual(results, compare)\n\n def test_show_rentals(self):\n \"\"\"Testing the functionality of showing all users who rented a given product\"\"\"\n directory_name = './csv_files'\n product_file = 'products.csv'\n customer_file = 'customers.csv'\n rentals_file = 'rentals.csv'\n main.import_data(directory_name, product_file, customer_file, rentals_file)\n results = main.show_rentals('prd002')\n compare = {'user004': {'user_id': 'user004', 'name': 'Luke Organa', 'address': '1235 Aldern Lane', 'phone_number': '4796268775', 'email': 'sw@storm.net'}, 'user002': {'user_id': 'user002', 'name': 'Anna Light', 'address': '568 Elder Road', 'phone_number': '5612378451', 'email': 'anna@yahoo.com'}}\n self.assertEqual(results, compare)\n", "source": "the_stack_v2_python_sparse", "source_path": "students/humberto_gonzalez/lesson05/test_database.py", "source_repo": "JavaRod/SP_Python220B_2019", "split": "test", "star_events_count": 1} {"blob_id": "6c5a5dd5f216cd4c48ce443cfdaca81466ae40bb", "bodies": ["from django.contrib.auth import authenticate\nlogins = [{'type': 'valid user and pass', 'user': 'rf', 'pass': 'rf'}, {'type': 'invalid pass on admin', 'user': 'admin', 'pass': 'rf'}, {'type': 'invalid pass', 'user': 'rf', 'pass': 'notthepassword'}, {'type': 'invalid user', 'user': 'newphone', 'pass': 'rf'}]\nfor login in logins:\n with self.subTest('trying {type}'.format(type=login['type'])):\n attempt = authenticate(self.client.request(), username=login['user'], password=login['pass'])\n if login is logins[0]:\n self.assertEqual(attempt, User.objects.get(username='rf'))\n else:\n self.assertEqual(attempt, None)", "backend = SPFBackend()\nself.assertIsNone(backend.get_user(9999), 'Invalid user should not exist according to backend and fixtures')\nself.assertIsNotNone(backend.get_user(1), 'Valid user should exist according to backend and fixtures')"], "bodies_text": "<|body_start_0|>\n from django.contrib.auth import authenticate\n logins = [{'type': 'valid user and pass', 'user': 'rf', 'pass': 'rf'}, {'type': 'invalid pass on admin', 'user': 'admin', 'pass': 'rf'}, {'type': 'invalid pass', 'user': 'rf', 'pass': 'notthepassword'}, {'type': 'invalid user', 'user': 'newphone', 'pass': 'rf'}]\n for login in logins:\n with self.subTest('trying {type}'.format(type=login['type'])):\n attempt = authenticate(self.client.request(), username=login['user'], password=login['pass'])\n if login is logins[0]:\n self.assertEqual(attempt, User.objects.get(username='rf'))\n else:\n self.assertEqual(attempt, None)\n<|end_body_0|>\n\n<|body_start_1|>\n backend = SPFBackend()\n self.assertIsNone(backend.get_user(9999), 'Invalid user should not exist according to backend and fixtures')\n self.assertIsNotNone(backend.get_user(1), 'Valid user should exist according to backend and fixtures')\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AuthenticationTests", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuthenticationTests:\n\n def test_various_logins(self):\n \"\"\"Validate that our custom authentication backgrounds works when logins fail as well.\"\"\"\n <|body_0|>\n\n def test_invalid_pk_to_get_user(self):\n \"\"\"Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from django.contrib.auth import authenticate\n logins = [{'type': 'valid user and pass', 'user': 'rf', 'pass': 'rf'}, {'type': 'invalid pass on admin', 'user': 'admin', 'pass': 'rf'}, {'type': 'invalid pass', 'user': 'rf', 'pass': 'notthepassword'}, {'type': 'invalid user', 'user': 'newphone', 'pass': 'rf'}]\n for login in logins:\n with self.subTest('trying {type}'.format(type=login['type'])):\n attempt = authenticate(self.client.request(), username=login['user'], password=login['pass'])\n if login is logins[0]:\n self.assertEqual(attempt, User.objects.get(username='rf'))\n else:\n self.assertEqual(attempt, None)\n<|end_body_0|>\n\n<|body_start_1|>\n backend = SPFBackend()\n self.assertIsNone(backend.get_user(9999), 'Invalid user should not exist according to backend and fixtures')\n self.assertIsNotNone(backend.get_user(1), 'Valid user should exist according to backend and fixtures')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000256", "length_bytes": 1913, "license_type": "no_license", "methods": [{"docstring": "Validate that our custom authentication backgrounds works when logins fail as well.", "name": "test_various_logins", "signature": "def test_various_logins(self)"}, {"docstring": "Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.", "name": "test_invalid_pk_to_get_user", "signature": "def test_invalid_pk_to_get_user(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028016", "prompt": "Implement the Python class `AuthenticationTests` described below.\n\nClass description:\nImplement the AuthenticationTests class.\n\nMethod signatures and docstrings:\n- def test_various_logins(self): Validate that our custom authentication backgrounds works when logins fail as well.\n- def test_invalid_pk_to_get_user(self): Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.", "prompted_full_text": "Implement the Python class `AuthenticationTests` described below.\n\nClass description:\nImplement the AuthenticationTests class.\n\nMethod signatures and docstrings:\n- def test_various_logins(self): Validate that our custom authentication backgrounds works when logins fail as well.\n- def test_invalid_pk_to_get_user(self): Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.\n\n<|skeleton|>\nclass AuthenticationTests:\n\n def test_various_logins(self):\n \"\"\"Validate that our custom authentication backgrounds works when logins fail as well.\"\"\"\n <|body_0|>\n\n def test_invalid_pk_to_get_user(self):\n \"\"\"Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from django.contrib.auth import authenticate\n logins = [{'type': 'valid user and pass', 'user': 'rf', 'pass': 'rf'}, {'type': 'invalid pass on admin', 'user': 'admin', 'pass': 'rf'}, {'type': 'invalid pass', 'user': 'rf', 'pass': 'notthepassword'}, {'type': 'invalid user', 'user': 'newphone', 'pass': 'rf'}]\n for login in logins:\n with self.subTest('trying {type}'.format(type=login['type'])):\n attempt = authenticate(self.client.request(), username=login['user'], password=login['pass'])\n if login is logins[0]:\n self.assertEqual(attempt, User.objects.get(username='rf'))\n else:\n self.assertEqual(attempt, None)\n<|end_body_0|>\n\n<|body_start_1|>\n backend = SPFBackend()\n self.assertIsNone(backend.get_user(9999), 'Invalid user should not exist according to backend and fixtures')\n self.assertIsNotNone(backend.get_user(1), 'Valid user should exist according to backend and fixtures')\n<|end_body_1|>\n", "revision_id": "26144969002d5d36a3f6839699d2e6de001fabb8", "skeleton": "<|skeleton|>\nclass AuthenticationTests:\n\n def test_various_logins(self):\n \"\"\"Validate that our custom authentication backgrounds works when logins fail as well.\"\"\"\n <|body_0|>\n\n def test_invalid_pk_to_get_user(self):\n \"\"\"Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuthenticationTests:\n def test_various_logins(self):\n \"\"\"Validate that our custom authentication backgrounds works when logins fail as well.\"\"\"\n from django.contrib.auth import authenticate\n logins = [{'type': 'valid user and pass', 'user': 'rf', 'pass': 'rf'}, {'type': 'invalid pass on admin', 'user': 'admin', 'pass': 'rf'}, {'type': 'invalid pass', 'user': 'rf', 'pass': 'notthepassword'}, {'type': 'invalid user', 'user': 'newphone', 'pass': 'rf'}]\n for login in logins:\n with self.subTest('trying {type}'.format(type=login['type'])):\n attempt = authenticate(self.client.request(), username=login['user'], password=login['pass'])\n if login is logins[0]:\n self.assertEqual(attempt, User.objects.get(username='rf'))\n else:\n self.assertEqual(attempt, None)\n\n def test_invalid_pk_to_get_user(self):\n \"\"\"Tests whether the backend successfully works, in returning a User when it should, and None when it shouldn't. Plus, it also ups our coverage a bit.\"\"\"\n backend = SPFBackend()\n self.assertIsNone(backend.get_user(9999), 'Invalid user should not exist according to backend and fixtures')\n self.assertIsNotNone(backend.get_user(1), 'Valid user should exist according to backend and fixtures')\n", "source": "the_stack_v2_python_sparse", "source_path": "spbm/apps/accounts/tests.py", "source_repo": "SPF-UiO/spbm", "split": "test", "star_events_count": 4} {"blob_id": "531f69d2d4a18b62a984dbe77597731740cd6507", "bodies": ["if not nums:\n return 0\nacc_sum = {}\nacc_sum[0] = -1\nres = 0\ncurr_sum = 0\nfor i in range(len(nums)):\n curr_sum += nums[i]\n if curr_sum - k in acc_sum:\n res = max(res, i - acc_sum[curr_sum - k])\n if curr_sum not in acc_sum:\n acc_sum[curr_sum] = i\nreturn res", "if not nums:\n return 0\nl = len(nums)\nacc = [0 for x in range(l)]\naccMap = {}\nres = 0\naccMap[0] = -1\nfor i in range(l):\n if i == 0:\n acc[i] = nums[i]\n else:\n acc[i] = acc[i - 1] + nums[i]\n if acc[i] not in accMap:\n accMap[acc[i]] = i\nfor i in range(l):\n if acc[i] - k in accMap:\n res = max(res, i - accMap[acc[i] - k])\nreturn res"], "bodies_text": "<|body_start_0|>\n if not nums:\n return 0\n acc_sum = {}\n acc_sum[0] = -1\n res = 0\n curr_sum = 0\n for i in range(len(nums)):\n curr_sum += nums[i]\n if curr_sum - k in acc_sum:\n res = max(res, i - acc_sum[curr_sum - k])\n if curr_sum not in acc_sum:\n acc_sum[curr_sum] = i\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n l = len(nums)\n acc = [0 for x in range(l)]\n accMap = {}\n res = 0\n accMap[0] = -1\n for i in range(l):\n if i == 0:\n acc[i] = nums[i]\n else:\n acc[i] = acc[i - 1] + nums[i]\n if acc[i] not in accMap:\n accMap[acc[i]] = i\n for i in range(l):\n if acc[i] - k in accMap:\n res = max(res, i - accMap[acc[i] - k])\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxSubArrayLen_I(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def maxSubArrayLen_II(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not nums:\n return 0\n acc_sum = {}\n acc_sum[0] = -1\n res = 0\n curr_sum = 0\n for i in range(len(nums)):\n curr_sum += nums[i]\n if curr_sum - k in acc_sum:\n res = max(res, i - acc_sum[curr_sum - k])\n if curr_sum not in acc_sum:\n acc_sum[curr_sum] = i\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n l = len(nums)\n acc = [0 for x in range(l)]\n accMap = {}\n res = 0\n accMap[0] = -1\n for i in range(l):\n if i == 0:\n acc[i] = nums[i]\n else:\n acc[i] = acc[i - 1] + nums[i]\n if acc[i] not in accMap:\n accMap[acc[i]] = i\n for i in range(l):\n if acc[i] - k in accMap:\n res = max(res, i - accMap[acc[i] - k])\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000257", "length_bytes": 2108, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type k: int :rtype: int", "name": "maxSubArrayLen_I", "signature": "def maxSubArrayLen_I(self, nums, k)"}, {"docstring": ":type nums: List[int] :type k: int :rtype: int", "name": "maxSubArrayLen_II", "signature": "def maxSubArrayLen_II(self, nums, k)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSubArrayLen_I(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n- def maxSubArrayLen_II(self, nums, k): :type nums: List[int] :type k: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSubArrayLen_I(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n- def maxSubArrayLen_II(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def maxSubArrayLen_I(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def maxSubArrayLen_II(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not nums:\n return 0\n acc_sum = {}\n acc_sum[0] = -1\n res = 0\n curr_sum = 0\n for i in range(len(nums)):\n curr_sum += nums[i]\n if curr_sum - k in acc_sum:\n res = max(res, i - acc_sum[curr_sum - k])\n if curr_sum not in acc_sum:\n acc_sum[curr_sum] = i\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n if not nums:\n return 0\n l = len(nums)\n acc = [0 for x in range(l)]\n accMap = {}\n res = 0\n accMap[0] = -1\n for i in range(l):\n if i == 0:\n acc[i] = nums[i]\n else:\n acc[i] = acc[i - 1] + nums[i]\n if acc[i] not in accMap:\n accMap[acc[i]] = i\n for i in range(l):\n if acc[i] - k in accMap:\n res = max(res, i - accMap[acc[i] - k])\n return res\n<|end_body_1|>\n", "revision_id": "1a3c1f4d6e9d3444039f087763b93241f4ba7892", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxSubArrayLen_I(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def maxSubArrayLen_II(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maxSubArrayLen_I(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n if not nums:\n return 0\n acc_sum = {}\n acc_sum[0] = -1\n res = 0\n curr_sum = 0\n for i in range(len(nums)):\n curr_sum += nums[i]\n if curr_sum - k in acc_sum:\n res = max(res, i - acc_sum[curr_sum - k])\n if curr_sum not in acc_sum:\n acc_sum[curr_sum] = i\n return res\n\n def maxSubArrayLen_II(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n if not nums:\n return 0\n l = len(nums)\n acc = [0 for x in range(l)]\n accMap = {}\n res = 0\n accMap[0] = -1\n for i in range(l):\n if i == 0:\n acc[i] = nums[i]\n else:\n acc[i] = acc[i - 1] + nums[i]\n if acc[i] not in accMap:\n accMap[acc[i]] = i\n for i in range(l):\n if acc[i] - k in accMap:\n res = max(res, i - accMap[acc[i] - k])\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "Algorithm/325_Maximum_Size_Subarray_Sum_Equals_k.py", "source_repo": "Gi1ia/TechNoteBook", "split": "test", "star_events_count": 7} {"blob_id": "f9708acaf0f62ab1458ff85c7742fad625c55726", "bodies": ["cursor = connection.cursor()\ncursor.execute('\\n INSERT INTO \"coupon_couponaction\" (\"action_id\", \"coupon_id\",\\n \"count\")\\n SELECT %(action_id)s, \"id\", 0\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_couponaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );\\n\\n UPDATE \"coupon_couponaction\"\\n SET \"count\" = \"count\" + 1\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s;', {'action_id': action_id, 'coupon_id': coupon_id})\nif consumer_id:\n cursor.execute('\\n INSERT INTO \"coupon_consumeraction\" (\\n \"action_id\", \"coupon_id\", \"consumer_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(consumer_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_consumeraction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"consumer_id\" = %(consumer_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'consumer_id': consumer_id})\nif subscriber_id:\n cursor.execute('\\n INSERT INTO \"coupon_subscriberaction\" (\\n \"action_id\", \"coupon_id\", \"subscriber_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(subscriber_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_subscriberaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"subscriber_id\" = %(subscriber_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'subscriber_id': subscriber_id})\ntry:\n transaction.commit()\nexcept IntegrityError:\n transaction.rollback()", "self.do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id)\nif action_id in [3, 7]:\n rank_date_time, created = RankDateTime.objects.get_or_create(coupon_id=coupon_id)\n if not created:\n rank_date_time.save()\nreturn"], "bodies_text": "<|body_start_0|>\n cursor = connection.cursor()\n cursor.execute('\\n INSERT INTO \"coupon_couponaction\" (\"action_id\", \"coupon_id\",\\n \"count\")\\n SELECT %(action_id)s, \"id\", 0\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_couponaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );\\n\\n UPDATE \"coupon_couponaction\"\\n SET \"count\" = \"count\" + 1\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s;', {'action_id': action_id, 'coupon_id': coupon_id})\n if consumer_id:\n cursor.execute('\\n INSERT INTO \"coupon_consumeraction\" (\\n \"action_id\", \"coupon_id\", \"consumer_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(consumer_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_consumeraction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"consumer_id\" = %(consumer_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'consumer_id': consumer_id})\n if subscriber_id:\n cursor.execute('\\n INSERT INTO \"coupon_subscriberaction\" (\\n \"action_id\", \"coupon_id\", \"subscriber_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(subscriber_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_subscriberaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"subscriber_id\" = %(subscriber_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'subscriber_id': subscriber_id})\n try:\n transaction.commit()\n except IntegrityError:\n transaction.rollback()\n<|end_body_0|>\n\n<|body_start_1|>\n self.do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id)\n if action_id in [3, 7]:\n rank_date_time, created = RankDateTime.objects.get_or_create(coupon_id=coupon_id)\n if not created:\n rank_date_time.save()\n return\n<|end_body_1|>\n", "class_docstring": "Task class for recording an action for a coupon.", "class_name": "RecordAction", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecordAction:\n \"\"\"Task class for recording an action for a coupon.\"\"\"\n\n def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id):\n \"\"\"Perform raw sql then commit or rollback.\"\"\"\n <|body_0|>\n\n def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None):\n \"\"\"Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cursor = connection.cursor()\n cursor.execute('\\n INSERT INTO \"coupon_couponaction\" (\"action_id\", \"coupon_id\",\\n \"count\")\\n SELECT %(action_id)s, \"id\", 0\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_couponaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );\\n\\n UPDATE \"coupon_couponaction\"\\n SET \"count\" = \"count\" + 1\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s;', {'action_id': action_id, 'coupon_id': coupon_id})\n if consumer_id:\n cursor.execute('\\n INSERT INTO \"coupon_consumeraction\" (\\n \"action_id\", \"coupon_id\", \"consumer_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(consumer_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_consumeraction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"consumer_id\" = %(consumer_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'consumer_id': consumer_id})\n if subscriber_id:\n cursor.execute('\\n INSERT INTO \"coupon_subscriberaction\" (\\n \"action_id\", \"coupon_id\", \"subscriber_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(subscriber_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_subscriberaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"subscriber_id\" = %(subscriber_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'subscriber_id': subscriber_id})\n try:\n transaction.commit()\n except IntegrityError:\n transaction.rollback()\n<|end_body_0|>\n\n<|body_start_1|>\n self.do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id)\n if action_id in [3, 7]:\n rank_date_time, created = RankDateTime.objects.get_or_create(coupon_id=coupon_id)\n if not created:\n rank_date_time.save()\n return\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000258", "length_bytes": 20538, "license_type": "no_license", "methods": [{"docstring": "Perform raw sql then commit or rollback.", "name": "do_raw_sql", "signature": "def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id)"}, {"docstring": "Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.", "name": "run", "signature": "def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None)"}], "n_methods": 2, "prompt": "Implement the Python class `RecordAction` described below.\n\nClass description:\nTask class for recording an action for a coupon.\n\nMethod signatures and docstrings:\n- def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id): Perform raw sql then commit or rollback.\n- def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None): Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.", "prompted_full_text": "Implement the Python class `RecordAction` described below.\n\nClass description:\nTask class for recording an action for a coupon.\n\nMethod signatures and docstrings:\n- def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id): Perform raw sql then commit or rollback.\n- def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None): Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.\n\n<|skeleton|>\nclass RecordAction:\n \"\"\"Task class for recording an action for a coupon.\"\"\"\n\n def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id):\n \"\"\"Perform raw sql then commit or rollback.\"\"\"\n <|body_0|>\n\n def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None):\n \"\"\"Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cursor = connection.cursor()\n cursor.execute('\\n INSERT INTO \"coupon_couponaction\" (\"action_id\", \"coupon_id\",\\n \"count\")\\n SELECT %(action_id)s, \"id\", 0\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_couponaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );\\n\\n UPDATE \"coupon_couponaction\"\\n SET \"count\" = \"count\" + 1\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s;', {'action_id': action_id, 'coupon_id': coupon_id})\n if consumer_id:\n cursor.execute('\\n INSERT INTO \"coupon_consumeraction\" (\\n \"action_id\", \"coupon_id\", \"consumer_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(consumer_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_consumeraction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"consumer_id\" = %(consumer_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'consumer_id': consumer_id})\n if subscriber_id:\n cursor.execute('\\n INSERT INTO \"coupon_subscriberaction\" (\\n \"action_id\", \"coupon_id\", \"subscriber_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(subscriber_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_subscriberaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"subscriber_id\" = %(subscriber_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'subscriber_id': subscriber_id})\n try:\n transaction.commit()\n except IntegrityError:\n transaction.rollback()\n<|end_body_0|>\n\n<|body_start_1|>\n self.do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id)\n if action_id in [3, 7]:\n rank_date_time, created = RankDateTime.objects.get_or_create(coupon_id=coupon_id)\n if not created:\n rank_date_time.save()\n return\n<|end_body_1|>\n", "revision_id": "a780ccdc3350d4b5c7990c65d1af8d71060c62cc", "skeleton": "<|skeleton|>\nclass RecordAction:\n \"\"\"Task class for recording an action for a coupon.\"\"\"\n\n def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id):\n \"\"\"Perform raw sql then commit or rollback.\"\"\"\n <|body_0|>\n\n def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None):\n \"\"\"Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecordAction:\n \"\"\"Task class for recording an action for a coupon.\"\"\"\n\n def do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id):\n \"\"\"Perform raw sql then commit or rollback.\"\"\"\n cursor = connection.cursor()\n cursor.execute('\\n INSERT INTO \"coupon_couponaction\" (\"action_id\", \"coupon_id\",\\n \"count\")\\n SELECT %(action_id)s, \"id\", 0\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_couponaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );\\n\\n UPDATE \"coupon_couponaction\"\\n SET \"count\" = \"count\" + 1\\n WHERE \"action_id\" = %(action_id)s\\n AND \"coupon_id\" = %(coupon_id)s;', {'action_id': action_id, 'coupon_id': coupon_id})\n if consumer_id:\n cursor.execute('\\n INSERT INTO \"coupon_consumeraction\" (\\n \"action_id\", \"coupon_id\", \"consumer_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(consumer_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_consumeraction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"consumer_id\" = %(consumer_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'consumer_id': consumer_id})\n if subscriber_id:\n cursor.execute('\\n INSERT INTO \"coupon_subscriberaction\" (\\n \"action_id\", \"coupon_id\", \"subscriber_id\", \"create_datetime\")\\n SELECT %(action_id)s, \"id\", %(subscriber_id)s, now()\\n FROM \"coupon_coupon\"\\n WHERE \"id\" = %(coupon_id)s\\n AND \"id\" NOT IN (\\n SELECT \"coupon_id\"\\n FROM \"coupon_subscriberaction\"\\n WHERE \"action_id\" = %(action_id)s\\n AND \"subscriber_id\" = %(subscriber_id)s\\n AND \"coupon_id\" = %(coupon_id)s\\n );', {'action_id': action_id, 'coupon_id': coupon_id, 'subscriber_id': subscriber_id})\n try:\n transaction.commit()\n except IntegrityError:\n transaction.rollback()\n\n def run(self, action_id, coupon_id, consumer_id=None, subscriber_id=None):\n \"\"\"Creates or increments a coupon action. If consumer, creates or increments a consumer action. If subscriber, creates or increments a subscriber action.\"\"\"\n self.do_raw_sql(action_id, coupon_id, consumer_id, subscriber_id)\n if action_id in [3, 7]:\n rank_date_time, created = RankDateTime.objects.get_or_create(coupon_id=coupon_id)\n if not created:\n rank_date_time.save()\n return\n", "source": "the_stack_v2_python_sparse", "source_path": "coupon/tasks.py", "source_repo": "wcirillo/ten", "split": "test", "star_events_count": 0} {"blob_id": "99090aa4fad4c9ba36cc4895c2779a3f4498a7c4", "bodies": ["user = getattr(request._request, 'user', None)\nif not user or not user.is_active:\n return None\nself.enforce_csrf(request)\nreturn (user, None)", "reason = CSRFCheck().process_view(request, None, (), {})\nif reason:\n raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)"], "bodies_text": "<|body_start_0|>\n user = getattr(request._request, 'user', None)\n if not user or not user.is_active:\n return None\n self.enforce_csrf(request)\n return (user, None)\n<|end_body_0|>\n\n<|body_start_1|>\n reason = CSRFCheck().process_view(request, None, (), {})\n if reason:\n raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)\n<|end_body_1|>\n", "class_docstring": "Use Django's session framework for authentication.", "class_name": "SessionAuthentication", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SessionAuthentication:\n \"\"\"Use Django's session framework for authentication.\"\"\"\n\n def authenticate(self, request):\n \"\"\"Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.\"\"\"\n <|body_0|>\n\n def enforce_csrf(self, request):\n \"\"\"Enforce CSRF validation for session based authentication.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = getattr(request._request, 'user', None)\n if not user or not user.is_active:\n return None\n self.enforce_csrf(request)\n return (user, None)\n<|end_body_0|>\n\n<|body_start_1|>\n reason = CSRFCheck().process_view(request, None, (), {})\n if reason:\n raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000259", "length_bytes": 8216, "license_type": "permissive", "methods": [{"docstring": "Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.", "name": "authenticate", "signature": "def authenticate(self, request)"}, {"docstring": "Enforce CSRF validation for session based authentication.", "name": "enforce_csrf", "signature": "def enforce_csrf(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053269", "prompt": "Implement the Python class `SessionAuthentication` described below.\n\nClass description:\nUse Django's session framework for authentication.\n\nMethod signatures and docstrings:\n- def authenticate(self, request): Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.\n- def enforce_csrf(self, request): Enforce CSRF validation for session based authentication.", "prompted_full_text": "Implement the Python class `SessionAuthentication` described below.\n\nClass description:\nUse Django's session framework for authentication.\n\nMethod signatures and docstrings:\n- def authenticate(self, request): Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.\n- def enforce_csrf(self, request): Enforce CSRF validation for session based authentication.\n\n<|skeleton|>\nclass SessionAuthentication:\n \"\"\"Use Django's session framework for authentication.\"\"\"\n\n def authenticate(self, request):\n \"\"\"Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.\"\"\"\n <|body_0|>\n\n def enforce_csrf(self, request):\n \"\"\"Enforce CSRF validation for session based authentication.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = getattr(request._request, 'user', None)\n if not user or not user.is_active:\n return None\n self.enforce_csrf(request)\n return (user, None)\n<|end_body_0|>\n\n<|body_start_1|>\n reason = CSRFCheck().process_view(request, None, (), {})\n if reason:\n raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)\n<|end_body_1|>\n", "revision_id": "7e3dedddbe821283d909393f333eed4acd452953", "skeleton": "<|skeleton|>\nclass SessionAuthentication:\n \"\"\"Use Django's session framework for authentication.\"\"\"\n\n def authenticate(self, request):\n \"\"\"Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.\"\"\"\n <|body_0|>\n\n def enforce_csrf(self, request):\n \"\"\"Enforce CSRF validation for session based authentication.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SessionAuthentication:\n \"\"\"Use Django's session framework for authentication.\"\"\"\n\n def authenticate(self, request):\n \"\"\"Returns a `User` if the request session currently has a logged in user. Otherwise returns `None`.\"\"\"\n user = getattr(request._request, 'user', None)\n if not user or not user.is_active:\n return None\n self.enforce_csrf(request)\n return (user, None)\n\n def enforce_csrf(self, request):\n \"\"\"Enforce CSRF validation for session based authentication.\"\"\"\n reason = CSRFCheck().process_view(request, None, (), {})\n if reason:\n raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)\n", "source": "the_stack_v2_python_sparse", "source_path": "api/authentication.py", "source_repo": "erigones/esdc-ce", "split": "test", "star_events_count": 123} {"blob_id": "f324d0f7e8fbd7f06ca93dee64fad3919778e466", "bodies": ["self.capacity = capacity\nself.cache = dict()\nself.count = defaultdict(int)\nself.freq = defaultdict(list)", "if not key in self.cache:\n return -1\n_freq = self.count[key]\nself.count[key] = _freq + 1\nself.freq[_freq].remove(key)\nif not self.freq[_freq]:\n self.freq.pop(_freq)\nself.freq[_freq + 1].append(key)\nreturn self.cache[key]", "if self.capacity == 0:\n return\nif key not in self.cache:\n if len(self.cache) >= self.capacity:\n min_freq = min(self.freq.keys())\n key_to_remove = self.freq[min_freq].pop(0)\n if not self.freq[min_freq]:\n self.freq.pop(min_freq)\n self.cache.pop(key_to_remove)\n self.count.pop(key_to_remove)\n_freq = self.count[key]\nif self.freq[_freq]:\n self.freq[_freq].remove(key)\nif not self.freq[_freq]:\n self.freq.pop(_freq)\nself.count[key] = _freq + 1\nself.cache[key] = value\nself.freq[_freq + 1].append(key)"], "bodies_text": "<|body_start_0|>\n self.capacity = capacity\n self.cache = dict()\n self.count = defaultdict(int)\n self.freq = defaultdict(list)\n<|end_body_0|>\n\n<|body_start_1|>\n if not key in self.cache:\n return -1\n _freq = self.count[key]\n self.count[key] = _freq + 1\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.freq[_freq + 1].append(key)\n return self.cache[key]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.capacity == 0:\n return\n if key not in self.cache:\n if len(self.cache) >= self.capacity:\n min_freq = min(self.freq.keys())\n key_to_remove = self.freq[min_freq].pop(0)\n if not self.freq[min_freq]:\n self.freq.pop(min_freq)\n self.cache.pop(key_to_remove)\n self.count.pop(key_to_remove)\n _freq = self.count[key]\n if self.freq[_freq]:\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.count[key] = _freq + 1\n self.cache[key] = value\n self.freq[_freq + 1].append(key)\n<|end_body_2|>\n", "class_docstring": "Class which implement LFU cache algorithm", "class_name": "LFUCache", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LFUCache:\n \"\"\"Class which implement LFU cache algorithm\"\"\"\n\n def __init__(self, capacity):\n \"\"\":type capacity: int :rtype: None\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: str :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: nothing\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capacity = capacity\n self.cache = dict()\n self.count = defaultdict(int)\n self.freq = defaultdict(list)\n<|end_body_0|>\n\n<|body_start_1|>\n if not key in self.cache:\n return -1\n _freq = self.count[key]\n self.count[key] = _freq + 1\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.freq[_freq + 1].append(key)\n return self.cache[key]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.capacity == 0:\n return\n if key not in self.cache:\n if len(self.cache) >= self.capacity:\n min_freq = min(self.freq.keys())\n key_to_remove = self.freq[min_freq].pop(0)\n if not self.freq[min_freq]:\n self.freq.pop(min_freq)\n self.cache.pop(key_to_remove)\n self.count.pop(key_to_remove)\n _freq = self.count[key]\n if self.freq[_freq]:\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.count[key] = _freq + 1\n self.cache[key] = value\n self.freq[_freq + 1].append(key)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000260", "length_bytes": 7259, "license_type": "no_license", "methods": [{"docstring": ":type capacity: int :rtype: None", "name": "__init__", "signature": "def __init__(self, capacity)"}, {"docstring": ":type key: str :rtype: int", "name": "get", "signature": "def get(self, key)"}, {"docstring": ":type key: int :type value: int :rtype: nothing", "name": "put", "signature": "def put(self, key, value)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_021798", "prompt": "Implement the Python class `LFUCache` described below.\n\nClass description:\nClass which implement LFU cache algorithm\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int :rtype: None\n- def get(self, key): :type key: str :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: nothing", "prompted_full_text": "Implement the Python class `LFUCache` described below.\n\nClass description:\nClass which implement LFU cache algorithm\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int :rtype: None\n- def get(self, key): :type key: str :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: nothing\n\n<|skeleton|>\nclass LFUCache:\n \"\"\"Class which implement LFU cache algorithm\"\"\"\n\n def __init__(self, capacity):\n \"\"\":type capacity: int :rtype: None\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: str :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: nothing\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.capacity = capacity\n self.cache = dict()\n self.count = defaultdict(int)\n self.freq = defaultdict(list)\n<|end_body_0|>\n\n<|body_start_1|>\n if not key in self.cache:\n return -1\n _freq = self.count[key]\n self.count[key] = _freq + 1\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.freq[_freq + 1].append(key)\n return self.cache[key]\n<|end_body_1|>\n\n<|body_start_2|>\n if self.capacity == 0:\n return\n if key not in self.cache:\n if len(self.cache) >= self.capacity:\n min_freq = min(self.freq.keys())\n key_to_remove = self.freq[min_freq].pop(0)\n if not self.freq[min_freq]:\n self.freq.pop(min_freq)\n self.cache.pop(key_to_remove)\n self.count.pop(key_to_remove)\n _freq = self.count[key]\n if self.freq[_freq]:\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.count[key] = _freq + 1\n self.cache[key] = value\n self.freq[_freq + 1].append(key)\n<|end_body_2|>\n", "revision_id": "546cbce06fcd4bc34e16d42b5d5eb68fb25e16a9", "skeleton": "<|skeleton|>\nclass LFUCache:\n \"\"\"Class which implement LFU cache algorithm\"\"\"\n\n def __init__(self, capacity):\n \"\"\":type capacity: int :rtype: None\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: str :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: nothing\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LFUCache:\n \"\"\"Class which implement LFU cache algorithm\"\"\"\n\n def __init__(self, capacity):\n \"\"\":type capacity: int :rtype: None\"\"\"\n self.capacity = capacity\n self.cache = dict()\n self.count = defaultdict(int)\n self.freq = defaultdict(list)\n\n def get(self, key):\n \"\"\":type key: str :rtype: int\"\"\"\n if not key in self.cache:\n return -1\n _freq = self.count[key]\n self.count[key] = _freq + 1\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.freq[_freq + 1].append(key)\n return self.cache[key]\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: nothing\"\"\"\n if self.capacity == 0:\n return\n if key not in self.cache:\n if len(self.cache) >= self.capacity:\n min_freq = min(self.freq.keys())\n key_to_remove = self.freq[min_freq].pop(0)\n if not self.freq[min_freq]:\n self.freq.pop(min_freq)\n self.cache.pop(key_to_remove)\n self.count.pop(key_to_remove)\n _freq = self.count[key]\n if self.freq[_freq]:\n self.freq[_freq].remove(key)\n if not self.freq[_freq]:\n self.freq.pop(_freq)\n self.count[key] = _freq + 1\n self.cache[key] = value\n self.freq[_freq + 1].append(key)\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/solution_460.py", "source_repo": "eselyavka/python", "split": "test", "star_events_count": 0} {"blob_id": "c8fb797676618daac7f74c30a93cf673a5b83b0a", "bodies": ["super().__init__(weight, batch_axis, **kwargs)\nself.quantiles = quantiles\nself.num_quantiles = len(quantiles)\nself.is_equal_weights = is_equal_weights\nself.quantile_weights = quantile_weights if quantile_weights else self.compute_quantile_weights()", "if self.num_quantiles > 1:\n y_pred_all = F.split(y_pred, axis=-1, num_outputs=self.num_quantiles, squeeze_axis=1)\nelse:\n y_pred_all = [F.squeeze(y_pred, axis=-1)]\nqt_loss = []\nfor i, y_pred_q in enumerate(y_pred_all):\n q = self.quantiles[i]\n weighted_qt = self.compute_quantile_loss(F, y_true, y_pred_q, q) * self.quantile_weights[i]\n qt_loss.append(weighted_qt)\nstacked_qt_losses = F.stack(*qt_loss, axis=-1)\nsum_qt_loss = F.mean(stacked_qt_losses, axis=-1)\nif sample_weight is not None:\n return sample_weight * sum_qt_loss\nelse:\n return sum_qt_loss", "under_bias = p * F.maximum(y_true - y_pred_p, 0)\nover_bias = (1 - p) * F.maximum(y_pred_p - y_true, 0)\nqt_loss = 2 * (under_bias + over_bias)\nreturn qt_loss", "assert self.num_quantiles >= 0, f'invalid num_quantiles: {self.num_quantiles}'\nif self.num_quantiles == 0:\n quantile_weights = []\nelif self.is_equal_weights or self.num_quantiles == 1:\n quantile_weights = [1.0 / self.num_quantiles] * self.num_quantiles\nelse:\n quantile_weights = [0.5 * (self.quantiles[1] - self.quantiles[0])] + [0.5 * (self.quantiles[i + 1] - self.quantiles[i - 1]) for i in range(1, self.num_quantiles - 1)] + [0.5 * (self.quantiles[-1] - self.quantiles[-2])]\nreturn quantile_weights"], "bodies_text": "<|body_start_0|>\n super().__init__(weight, batch_axis, **kwargs)\n self.quantiles = quantiles\n self.num_quantiles = len(quantiles)\n self.is_equal_weights = is_equal_weights\n self.quantile_weights = quantile_weights if quantile_weights else self.compute_quantile_weights()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.num_quantiles > 1:\n y_pred_all = F.split(y_pred, axis=-1, num_outputs=self.num_quantiles, squeeze_axis=1)\n else:\n y_pred_all = [F.squeeze(y_pred, axis=-1)]\n qt_loss = []\n for i, y_pred_q in enumerate(y_pred_all):\n q = self.quantiles[i]\n weighted_qt = self.compute_quantile_loss(F, y_true, y_pred_q, q) * self.quantile_weights[i]\n qt_loss.append(weighted_qt)\n stacked_qt_losses = F.stack(*qt_loss, axis=-1)\n sum_qt_loss = F.mean(stacked_qt_losses, axis=-1)\n if sample_weight is not None:\n return sample_weight * sum_qt_loss\n else:\n return sum_qt_loss\n<|end_body_1|>\n\n<|body_start_2|>\n under_bias = p * F.maximum(y_true - y_pred_p, 0)\n over_bias = (1 - p) * F.maximum(y_pred_p - y_true, 0)\n qt_loss = 2 * (under_bias + over_bias)\n return qt_loss\n<|end_body_2|>\n\n<|body_start_3|>\n assert self.num_quantiles >= 0, f'invalid num_quantiles: {self.num_quantiles}'\n if self.num_quantiles == 0:\n quantile_weights = []\n elif self.is_equal_weights or self.num_quantiles == 1:\n quantile_weights = [1.0 / self.num_quantiles] * self.num_quantiles\n else:\n quantile_weights = [0.5 * (self.quantiles[1] - self.quantiles[0])] + [0.5 * (self.quantiles[i + 1] - self.quantiles[i - 1]) for i in range(1, self.num_quantiles - 1)] + [0.5 * (self.quantiles[-1] - self.quantiles[-2])]\n return quantile_weights\n<|end_body_3|>\n", "class_docstring": "", "class_name": "QuantileLoss", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuantileLoss:\n\n def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None:\n \"\"\"Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.\"\"\"\n <|body_0|>\n\n def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None):\n \"\"\"Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk\"\"\"\n <|body_1|>\n\n def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor:\n \"\"\"Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)\"\"\"\n <|body_2|>\n\n def compute_quantile_weights(self) -> List:\n \"\"\"Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(weight, batch_axis, **kwargs)\n self.quantiles = quantiles\n self.num_quantiles = len(quantiles)\n self.is_equal_weights = is_equal_weights\n self.quantile_weights = quantile_weights if quantile_weights else self.compute_quantile_weights()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.num_quantiles > 1:\n y_pred_all = F.split(y_pred, axis=-1, num_outputs=self.num_quantiles, squeeze_axis=1)\n else:\n y_pred_all = [F.squeeze(y_pred, axis=-1)]\n qt_loss = []\n for i, y_pred_q in enumerate(y_pred_all):\n q = self.quantiles[i]\n weighted_qt = self.compute_quantile_loss(F, y_true, y_pred_q, q) * self.quantile_weights[i]\n qt_loss.append(weighted_qt)\n stacked_qt_losses = F.stack(*qt_loss, axis=-1)\n sum_qt_loss = F.mean(stacked_qt_losses, axis=-1)\n if sample_weight is not None:\n return sample_weight * sum_qt_loss\n else:\n return sum_qt_loss\n<|end_body_1|>\n\n<|body_start_2|>\n under_bias = p * F.maximum(y_true - y_pred_p, 0)\n over_bias = (1 - p) * F.maximum(y_pred_p - y_true, 0)\n qt_loss = 2 * (under_bias + over_bias)\n return qt_loss\n<|end_body_2|>\n\n<|body_start_3|>\n assert self.num_quantiles >= 0, f'invalid num_quantiles: {self.num_quantiles}'\n if self.num_quantiles == 0:\n quantile_weights = []\n elif self.is_equal_weights or self.num_quantiles == 1:\n quantile_weights = [1.0 / self.num_quantiles] * self.num_quantiles\n else:\n quantile_weights = [0.5 * (self.quantiles[1] - self.quantiles[0])] + [0.5 * (self.quantiles[i + 1] - self.quantiles[i - 1]) for i in range(1, self.num_quantiles - 1)] + [0.5 * (self.quantiles[-1] - self.quantiles[-2])]\n return quantile_weights\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000261", "length_bytes": 9072, "license_type": "permissive", "methods": [{"docstring": "Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.", "name": "__init__", "signature": "def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None"}, {"docstring": "Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk", "name": "hybrid_forward", "signature": "def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None)"}, {"docstring": "Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)", "name": "compute_quantile_loss", "signature": "def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor"}, {"docstring": "Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.", "name": "compute_quantile_weights", "signature": "def compute_quantile_weights(self) -> List"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_030982", "prompt": "Implement the Python class `QuantileLoss` described below.\n\nClass description:\nImplement the QuantileLoss class.\n\nMethod signatures and docstrings:\n- def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None: Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.\n- def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None): Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk\n- def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor: Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)\n- def compute_quantile_weights(self) -> List: Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.", "prompted_full_text": "Implement the Python class `QuantileLoss` described below.\n\nClass description:\nImplement the QuantileLoss class.\n\nMethod signatures and docstrings:\n- def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None: Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.\n- def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None): Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk\n- def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor: Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)\n- def compute_quantile_weights(self) -> List: Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.\n\n<|skeleton|>\nclass QuantileLoss:\n\n def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None:\n \"\"\"Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.\"\"\"\n <|body_0|>\n\n def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None):\n \"\"\"Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk\"\"\"\n <|body_1|>\n\n def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor:\n \"\"\"Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)\"\"\"\n <|body_2|>\n\n def compute_quantile_weights(self) -> List:\n \"\"\"Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(weight, batch_axis, **kwargs)\n self.quantiles = quantiles\n self.num_quantiles = len(quantiles)\n self.is_equal_weights = is_equal_weights\n self.quantile_weights = quantile_weights if quantile_weights else self.compute_quantile_weights()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.num_quantiles > 1:\n y_pred_all = F.split(y_pred, axis=-1, num_outputs=self.num_quantiles, squeeze_axis=1)\n else:\n y_pred_all = [F.squeeze(y_pred, axis=-1)]\n qt_loss = []\n for i, y_pred_q in enumerate(y_pred_all):\n q = self.quantiles[i]\n weighted_qt = self.compute_quantile_loss(F, y_true, y_pred_q, q) * self.quantile_weights[i]\n qt_loss.append(weighted_qt)\n stacked_qt_losses = F.stack(*qt_loss, axis=-1)\n sum_qt_loss = F.mean(stacked_qt_losses, axis=-1)\n if sample_weight is not None:\n return sample_weight * sum_qt_loss\n else:\n return sum_qt_loss\n<|end_body_1|>\n\n<|body_start_2|>\n under_bias = p * F.maximum(y_true - y_pred_p, 0)\n over_bias = (1 - p) * F.maximum(y_pred_p - y_true, 0)\n qt_loss = 2 * (under_bias + over_bias)\n return qt_loss\n<|end_body_2|>\n\n<|body_start_3|>\n assert self.num_quantiles >= 0, f'invalid num_quantiles: {self.num_quantiles}'\n if self.num_quantiles == 0:\n quantile_weights = []\n elif self.is_equal_weights or self.num_quantiles == 1:\n quantile_weights = [1.0 / self.num_quantiles] * self.num_quantiles\n else:\n quantile_weights = [0.5 * (self.quantiles[1] - self.quantiles[0])] + [0.5 * (self.quantiles[i + 1] - self.quantiles[i - 1]) for i in range(1, self.num_quantiles - 1)] + [0.5 * (self.quantiles[-1] - self.quantiles[-2])]\n return quantile_weights\n<|end_body_3|>\n", "revision_id": "df4256b0e67120db555c109a1bf6cfa2b3bd3cd8", "skeleton": "<|skeleton|>\nclass QuantileLoss:\n\n def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None:\n \"\"\"Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.\"\"\"\n <|body_0|>\n\n def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None):\n \"\"\"Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk\"\"\"\n <|body_1|>\n\n def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor:\n \"\"\"Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)\"\"\"\n <|body_2|>\n\n def compute_quantile_weights(self) -> List:\n \"\"\"Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QuantileLoss:\n def __init__(self, quantiles: List[float], quantile_weights: Optional[List[float]]=None, is_equal_weights: bool=True, weight: Optional[float]=None, batch_axis: int=0, **kwargs) -> None:\n \"\"\"Represents the quantile loss used to fit decoders that learn quantiles. Parameters ---------- quantiles list of quantiles to compute loss over. quantile_weights weights of the quantiles. is_equal_weights use equally quantiles weights or not weight weighting of the loss. batch_axis indicates axis that represents the batch.\"\"\"\n super().__init__(weight, batch_axis, **kwargs)\n self.quantiles = quantiles\n self.num_quantiles = len(quantiles)\n self.is_equal_weights = is_equal_weights\n self.quantile_weights = quantile_weights if quantile_weights else self.compute_quantile_weights()\n\n def hybrid_forward(self, F, y_true: Tensor, y_pred: Tensor, sample_weight=None):\n \"\"\"Compute the weighted sum of quantile losses. Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)) y_pred predicted target, shape (N1 x N2 x ... x Nk x num_quantiles) sample_weight sample weights Returns ------- Tensor weighted sum of the quantile losses, shape N1 x N1 x ... Nk\"\"\"\n if self.num_quantiles > 1:\n y_pred_all = F.split(y_pred, axis=-1, num_outputs=self.num_quantiles, squeeze_axis=1)\n else:\n y_pred_all = [F.squeeze(y_pred, axis=-1)]\n qt_loss = []\n for i, y_pred_q in enumerate(y_pred_all):\n q = self.quantiles[i]\n weighted_qt = self.compute_quantile_loss(F, y_true, y_pred_q, q) * self.quantile_weights[i]\n qt_loss.append(weighted_qt)\n stacked_qt_losses = F.stack(*qt_loss, axis=-1)\n sum_qt_loss = F.mean(stacked_qt_losses, axis=-1)\n if sample_weight is not None:\n return sample_weight * sum_qt_loss\n else:\n return sum_qt_loss\n\n def compute_quantile_loss(F, y_true: Tensor, y_pred_p: Tensor, p: float) -> Tensor:\n \"\"\"Compute the quantile loss of the given quantile Parameters ---------- F A module that can either refer to the Symbol API or the NDArray API in MXNet. y_true true target, shape (N1 x N2 x ... x Nk x dimension of time series (normally 1)). y_pred_p predicted target quantile, shape (N1 x N2 x ... x Nk x 1). p quantile error to compute the loss. Returns ------- Tensor quantile loss, shape: (N1 x N2 x ... x Nk x 1)\"\"\"\n under_bias = p * F.maximum(y_true - y_pred_p, 0)\n over_bias = (1 - p) * F.maximum(y_pred_p - y_true, 0)\n qt_loss = 2 * (under_bias + over_bias)\n return qt_loss\n\n def compute_quantile_weights(self) -> List:\n \"\"\"Compute the exact weights of the approximated integral CRPS = sum_{i=0}^{n-1} 0.5 * (q_{i+1} - q_{i}) * (z_{i+1} + z_{i}) under the assumption of linear interpolation or SQF, where z_i is the ith quantile prediction q_i. The inner terms cancel due to the telescoping sum property and we obtain CRPS = sum_{i=1}^n w_i z_i, with the weights w_i = (q_{i+1}-q_{i-1})/2 for i = 1, ..., n-1, w_0 = (q_1-q_0)/2 and w_n = (w_n - w_{n-1})/2. Returns ------- List weights of the quantiles.\"\"\"\n assert self.num_quantiles >= 0, f'invalid num_quantiles: {self.num_quantiles}'\n if self.num_quantiles == 0:\n quantile_weights = []\n elif self.is_equal_weights or self.num_quantiles == 1:\n quantile_weights = [1.0 / self.num_quantiles] * self.num_quantiles\n else:\n quantile_weights = [0.5 * (self.quantiles[1] - self.quantiles[0])] + [0.5 * (self.quantiles[i + 1] - self.quantiles[i - 1]) for i in range(1, self.num_quantiles - 1)] + [0.5 * (self.quantiles[-1] - self.quantiles[-2])]\n return quantile_weights\n", "source": "the_stack_v2_python_sparse", "source_path": "src/gluonts/mx/block/quantile_output.py", "source_repo": "mbohlkeschneider/gluon-ts", "split": "test", "star_events_count": 54} {"blob_id": "ef2ea55617d96aa3deb6a8661432e3249a20aeda", "bodies": ["super(EncoderMix, self).__init__(idim=idim, selfattention_layer_type='selfattn', attention_dim=attention_dim, attention_heads=attention_heads, linear_units=linear_units, num_blocks=num_blocks_rec, dropout_rate=dropout_rate, positional_dropout_rate=positional_dropout_rate, attention_dropout_rate=attention_dropout_rate, input_layer=input_layer, pos_enc_class=pos_enc_class, normalize_before=normalize_before, concat_after=concat_after, positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, padding_idx=padding_idx)\npositionwise_layer, positionwise_layer_args = self.get_positionwise_layer(positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size)\nself.num_spkrs = num_spkrs\nself.encoders_sd = torch.nn.ModuleList([repeat(num_blocks_sd, lambda lnum: EncoderLayer(attention_dim, MultiHeadedAttention(attention_heads, attention_dim, attention_dropout_rate), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after)) for i in range(num_spkrs)])", "if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\nelse:\n xs = self.embed(xs)\nxs_sd, masks_sd = ([None] * self.num_spkrs, [None] * self.num_spkrs)\nfor ns in range(self.num_spkrs):\n xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs, masks)\n xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns])\n if self.normalize_before:\n xs_sd[ns] = self.after_norm(xs_sd[ns])\nreturn (xs_sd, masks_sd)", "if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\nelse:\n xs = self.embed(xs)\nnew_cache_sd = []\nfor ns in range(self.num_spkrs):\n if cache is None:\n cache = [None for _ in range(len(self.encoders_sd) + len(self.encoders_rec))]\n new_cache = []\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_sd[ns]):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_rec):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n new_cache_sd.append(new_cache)\n if self.normalize_before:\n xs = self.after_norm(xs)\nreturn (xs, masks, new_cache_sd)"], "bodies_text": "<|body_start_0|>\n super(EncoderMix, self).__init__(idim=idim, selfattention_layer_type='selfattn', attention_dim=attention_dim, attention_heads=attention_heads, linear_units=linear_units, num_blocks=num_blocks_rec, dropout_rate=dropout_rate, positional_dropout_rate=positional_dropout_rate, attention_dropout_rate=attention_dropout_rate, input_layer=input_layer, pos_enc_class=pos_enc_class, normalize_before=normalize_before, concat_after=concat_after, positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, padding_idx=padding_idx)\n positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size)\n self.num_spkrs = num_spkrs\n self.encoders_sd = torch.nn.ModuleList([repeat(num_blocks_sd, lambda lnum: EncoderLayer(attention_dim, MultiHeadedAttention(attention_heads, attention_dim, attention_dropout_rate), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after)) for i in range(num_spkrs)])\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs_sd, masks_sd = ([None] * self.num_spkrs, [None] * self.num_spkrs)\n for ns in range(self.num_spkrs):\n xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs, masks)\n xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns])\n if self.normalize_before:\n xs_sd[ns] = self.after_norm(xs_sd[ns])\n return (xs_sd, masks_sd)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n new_cache_sd = []\n for ns in range(self.num_spkrs):\n if cache is None:\n cache = [None for _ in range(len(self.encoders_sd) + len(self.encoders_rec))]\n new_cache = []\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_sd[ns]):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_rec):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n new_cache_sd.append(new_cache)\n if self.normalize_before:\n xs = self.after_norm(xs)\n return (xs, masks, new_cache_sd)\n<|end_body_2|>\n", "class_docstring": "Transformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl", "class_name": "EncoderMix", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EncoderMix:\n \"\"\"Transformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl\"\"\"\n\n def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2):\n \"\"\"Construct an Encoder object.\"\"\"\n <|body_0|>\n\n def forward(self, xs, masks):\n \"\"\"Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:\"\"\"\n <|body_1|>\n\n def forward_one_step(self, xs, masks, cache=None):\n \"\"\"Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncoderMix, self).__init__(idim=idim, selfattention_layer_type='selfattn', attention_dim=attention_dim, attention_heads=attention_heads, linear_units=linear_units, num_blocks=num_blocks_rec, dropout_rate=dropout_rate, positional_dropout_rate=positional_dropout_rate, attention_dropout_rate=attention_dropout_rate, input_layer=input_layer, pos_enc_class=pos_enc_class, normalize_before=normalize_before, concat_after=concat_after, positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, padding_idx=padding_idx)\n positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size)\n self.num_spkrs = num_spkrs\n self.encoders_sd = torch.nn.ModuleList([repeat(num_blocks_sd, lambda lnum: EncoderLayer(attention_dim, MultiHeadedAttention(attention_heads, attention_dim, attention_dropout_rate), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after)) for i in range(num_spkrs)])\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs_sd, masks_sd = ([None] * self.num_spkrs, [None] * self.num_spkrs)\n for ns in range(self.num_spkrs):\n xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs, masks)\n xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns])\n if self.normalize_before:\n xs_sd[ns] = self.after_norm(xs_sd[ns])\n return (xs_sd, masks_sd)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n new_cache_sd = []\n for ns in range(self.num_spkrs):\n if cache is None:\n cache = [None for _ in range(len(self.encoders_sd) + len(self.encoders_rec))]\n new_cache = []\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_sd[ns]):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_rec):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n new_cache_sd.append(new_cache)\n if self.normalize_before:\n xs = self.after_norm(xs)\n return (xs, masks, new_cache_sd)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000262", "length_bytes": 6407, "license_type": "permissive", "methods": [{"docstring": "Construct an Encoder object.", "name": "__init__", "signature": "def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2)"}, {"docstring": "Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:", "name": "forward", "signature": "def forward(self, xs, masks)"}, {"docstring": "Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:", "name": "forward_one_step", "signature": "def forward_one_step(self, xs, masks, cache=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_012224", "prompt": "Implement the Python class `EncoderMix` described below.\n\nClass description:\nTransformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl\n\nMethod signatures and docstrings:\n- def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2): Construct an Encoder object.\n- def forward(self, xs, masks): Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:\n- def forward_one_step(self, xs, masks, cache=None): Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:", "prompted_full_text": "Implement the Python class `EncoderMix` described below.\n\nClass description:\nTransformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl\n\nMethod signatures and docstrings:\n- def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2): Construct an Encoder object.\n- def forward(self, xs, masks): Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:\n- def forward_one_step(self, xs, masks, cache=None): Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:\n\n<|skeleton|>\nclass EncoderMix:\n \"\"\"Transformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl\"\"\"\n\n def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2):\n \"\"\"Construct an Encoder object.\"\"\"\n <|body_0|>\n\n def forward(self, xs, masks):\n \"\"\"Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:\"\"\"\n <|body_1|>\n\n def forward_one_step(self, xs, masks, cache=None):\n \"\"\"Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(EncoderMix, self).__init__(idim=idim, selfattention_layer_type='selfattn', attention_dim=attention_dim, attention_heads=attention_heads, linear_units=linear_units, num_blocks=num_blocks_rec, dropout_rate=dropout_rate, positional_dropout_rate=positional_dropout_rate, attention_dropout_rate=attention_dropout_rate, input_layer=input_layer, pos_enc_class=pos_enc_class, normalize_before=normalize_before, concat_after=concat_after, positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, padding_idx=padding_idx)\n positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size)\n self.num_spkrs = num_spkrs\n self.encoders_sd = torch.nn.ModuleList([repeat(num_blocks_sd, lambda lnum: EncoderLayer(attention_dim, MultiHeadedAttention(attention_heads, attention_dim, attention_dropout_rate), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after)) for i in range(num_spkrs)])\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs_sd, masks_sd = ([None] * self.num_spkrs, [None] * self.num_spkrs)\n for ns in range(self.num_spkrs):\n xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs, masks)\n xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns])\n if self.normalize_before:\n xs_sd[ns] = self.after_norm(xs_sd[ns])\n return (xs_sd, masks_sd)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n new_cache_sd = []\n for ns in range(self.num_spkrs):\n if cache is None:\n cache = [None for _ in range(len(self.encoders_sd) + len(self.encoders_rec))]\n new_cache = []\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_sd[ns]):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_rec):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n new_cache_sd.append(new_cache)\n if self.normalize_before:\n xs = self.after_norm(xs)\n return (xs, masks, new_cache_sd)\n<|end_body_2|>\n", "revision_id": "bcd20948db7846ee523443ef9fd78c7a1248c95e", "skeleton": "<|skeleton|>\nclass EncoderMix:\n \"\"\"Transformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl\"\"\"\n\n def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2):\n \"\"\"Construct an Encoder object.\"\"\"\n <|body_0|>\n\n def forward(self, xs, masks):\n \"\"\"Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:\"\"\"\n <|body_1|>\n\n def forward_one_step(self, xs, masks, cache=None):\n \"\"\"Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EncoderMix:\n \"\"\"Transformer encoder module. :param int idim: input dim :param int attention_dim: dimension of attention :param int attention_heads: the number of heads of multi head attention :param int linear_units: the number of units of position-wise feed forward :param int num_blocks: the number of decoder blocks :param float dropout_rate: dropout rate :param float attention_dropout_rate: dropout rate in attention :param float positional_dropout_rate: dropout rate after adding positional encoding :param str or torch.nn.Module input_layer: input layer type :param class pos_enc_class: PositionalEncoding or ScaledPositionalEncoding :param bool normalize_before: whether to use layer_norm before the first bl\"\"\"\n\n def __init__(self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks_sd=4, num_blocks_rec=8, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer='conv2d', pos_enc_class=PositionalEncoding, normalize_before=True, concat_after=False, positionwise_layer_type='linear', positionwise_conv_kernel_size=1, padding_idx=-1, num_spkrs=2):\n \"\"\"Construct an Encoder object.\"\"\"\n super(EncoderMix, self).__init__(idim=idim, selfattention_layer_type='selfattn', attention_dim=attention_dim, attention_heads=attention_heads, linear_units=linear_units, num_blocks=num_blocks_rec, dropout_rate=dropout_rate, positional_dropout_rate=positional_dropout_rate, attention_dropout_rate=attention_dropout_rate, input_layer=input_layer, pos_enc_class=pos_enc_class, normalize_before=normalize_before, concat_after=concat_after, positionwise_layer_type=positionwise_layer_type, positionwise_conv_kernel_size=positionwise_conv_kernel_size, padding_idx=padding_idx)\n positionwise_layer, positionwise_layer_args = self.get_positionwise_layer(positionwise_layer_type, attention_dim, linear_units, dropout_rate, positionwise_conv_kernel_size)\n self.num_spkrs = num_spkrs\n self.encoders_sd = torch.nn.ModuleList([repeat(num_blocks_sd, lambda lnum: EncoderLayer(attention_dim, MultiHeadedAttention(attention_heads, attention_dim, attention_dropout_rate), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after)) for i in range(num_spkrs)])\n\n def forward(self, xs, masks):\n \"\"\"Encode input sequence. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :return: position embedded tensor and mask :rtype Tuple[torch.Tensor, torch.Tensor]:\"\"\"\n if isinstance(self.embed, (Conv2dSubsampling, VGG2L)):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n xs_sd, masks_sd = ([None] * self.num_spkrs, [None] * self.num_spkrs)\n for ns in range(self.num_spkrs):\n xs_sd[ns], masks_sd[ns] = self.encoders_sd[ns](xs, masks)\n xs_sd[ns], masks_sd[ns] = self.encoders(xs_sd[ns], masks_sd[ns])\n if self.normalize_before:\n xs_sd[ns] = self.after_norm(xs_sd[ns])\n return (xs_sd, masks_sd)\n\n def forward_one_step(self, xs, masks, cache=None):\n \"\"\"Encode input frame. :param torch.Tensor xs: input tensor :param torch.Tensor masks: input mask :param List[torch.Tensor] cache: cache tensors :return: position embedded tensor, mask and new cache :rtype Tuple[torch.Tensor, torch.Tensor, List[torch.Tensor]]:\"\"\"\n if isinstance(self.embed, Conv2dSubsampling):\n xs, masks = self.embed(xs, masks)\n else:\n xs = self.embed(xs)\n new_cache_sd = []\n for ns in range(self.num_spkrs):\n if cache is None:\n cache = [None for _ in range(len(self.encoders_sd) + len(self.encoders_rec))]\n new_cache = []\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_sd[ns]):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n for c, e in zip(cache[:len(self.encoders_sd)], self.encoders_rec):\n xs, masks = e(xs, masks, cache=c)\n new_cache.append(xs)\n new_cache_sd.append(new_cache)\n if self.normalize_before:\n xs = self.after_norm(xs)\n return (xs, masks, new_cache_sd)\n", "source": "the_stack_v2_python_sparse", "source_path": "espnet/nets/pytorch_backend/transformer/encoder_mix.py", "source_repo": "espnet/espnet", "split": "test", "star_events_count": 7242} {"blob_id": "e83474f30a5e4ded4c5f0ae91bc63f76cf65e815", "bodies": ["cnt = 0\nl = set()\nfor i in range(len(nums) - 1):\n for n in range(i + 1, len(nums)):\n if abs(nums[i] - nums[n]) == k:\n if nums[i] not in l or nums[n] not in l:\n cnt += 1\n l.add(nums[i])\n l.add(nums[n])\nreturn cnt", "nums.sort()\ncomp = {}\nlast_pair, pairs = (None, 0)\nfor num in nums:\n if num in comp:\n if (comp[num], num) != last_pair:\n pairs += 1\n last_pair = (comp[num], num)\n comp[num + k] = num\nreturn pairs", "import collections\nct = collections.Counter(nums)\nif k < 0:\n return 0\nif k == 0:\n return sum((ct[i] > 1 for i in ct))\nif k > 0:\n return sum((i + k in ct for i in ct))"], "bodies_text": "<|body_start_0|>\n cnt = 0\n l = set()\n for i in range(len(nums) - 1):\n for n in range(i + 1, len(nums)):\n if abs(nums[i] - nums[n]) == k:\n if nums[i] not in l or nums[n] not in l:\n cnt += 1\n l.add(nums[i])\n l.add(nums[n])\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n nums.sort()\n comp = {}\n last_pair, pairs = (None, 0)\n for num in nums:\n if num in comp:\n if (comp[num], num) != last_pair:\n pairs += 1\n last_pair = (comp[num], num)\n comp[num + k] = num\n return pairs\n<|end_body_1|>\n\n<|body_start_2|>\n import collections\n ct = collections.Counter(nums)\n if k < 0:\n return 0\n if k == 0:\n return sum((ct[i] > 1 for i in ct))\n if k > 0:\n return sum((i + k in ct for i in ct))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findPairs(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def findPairs2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n def findPairs3(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnt = 0\n l = set()\n for i in range(len(nums) - 1):\n for n in range(i + 1, len(nums)):\n if abs(nums[i] - nums[n]) == k:\n if nums[i] not in l or nums[n] not in l:\n cnt += 1\n l.add(nums[i])\n l.add(nums[n])\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n nums.sort()\n comp = {}\n last_pair, pairs = (None, 0)\n for num in nums:\n if num in comp:\n if (comp[num], num) != last_pair:\n pairs += 1\n last_pair = (comp[num], num)\n comp[num + k] = num\n return pairs\n<|end_body_1|>\n\n<|body_start_2|>\n import collections\n ct = collections.Counter(nums)\n if k < 0:\n return 0\n if k == 0:\n return sum((ct[i] > 1 for i in ct))\n if k > 0:\n return sum((i + k in ct for i in ct))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000263", "length_bytes": 1441, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type k: int :rtype: int", "name": "findPairs", "signature": "def findPairs(self, nums, k)"}, {"docstring": ":type nums: List[int] :type k: int :rtype: int", "name": "findPairs2", "signature": "def findPairs2(self, nums, k)"}, {"docstring": ":type nums: List[int] :type k: int :rtype: int", "name": "findPairs3", "signature": "def findPairs3(self, nums, k)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_001934", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findPairs(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n- def findPairs2(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n- def findPairs3(self, nums, k): :type nums: List[int] :type k: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findPairs(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n- def findPairs2(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n- def findPairs3(self, nums, k): :type nums: List[int] :type k: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def findPairs(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def findPairs2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n def findPairs3(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cnt = 0\n l = set()\n for i in range(len(nums) - 1):\n for n in range(i + 1, len(nums)):\n if abs(nums[i] - nums[n]) == k:\n if nums[i] not in l or nums[n] not in l:\n cnt += 1\n l.add(nums[i])\n l.add(nums[n])\n return cnt\n<|end_body_0|>\n\n<|body_start_1|>\n nums.sort()\n comp = {}\n last_pair, pairs = (None, 0)\n for num in nums:\n if num in comp:\n if (comp[num], num) != last_pair:\n pairs += 1\n last_pair = (comp[num], num)\n comp[num + k] = num\n return pairs\n<|end_body_1|>\n\n<|body_start_2|>\n import collections\n ct = collections.Counter(nums)\n if k < 0:\n return 0\n if k == 0:\n return sum((ct[i] > 1 for i in ct))\n if k > 0:\n return sum((i + k in ct for i in ct))\n<|end_body_2|>\n", "revision_id": "b925bb22d1daa4a56c5a238a5758a926905559b4", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findPairs(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def findPairs2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n def findPairs3(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def findPairs(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n cnt = 0\n l = set()\n for i in range(len(nums) - 1):\n for n in range(i + 1, len(nums)):\n if abs(nums[i] - nums[n]) == k:\n if nums[i] not in l or nums[n] not in l:\n cnt += 1\n l.add(nums[i])\n l.add(nums[n])\n return cnt\n\n def findPairs2(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n nums.sort()\n comp = {}\n last_pair, pairs = (None, 0)\n for num in nums:\n if num in comp:\n if (comp[num], num) != last_pair:\n pairs += 1\n last_pair = (comp[num], num)\n comp[num + k] = num\n return pairs\n\n def findPairs3(self, nums, k):\n \"\"\":type nums: List[int] :type k: int :rtype: int\"\"\"\n import collections\n ct = collections.Counter(nums)\n if k < 0:\n return 0\n if k == 0:\n return sum((ct[i] > 1 for i in ct))\n if k > 0:\n return sum((i + k in ct for i in ct))\n", "source": "the_stack_v2_python_sparse", "source_path": "532. K-diff Pairs in an Array.py", "source_repo": "beninghton/notGivenUpToG", "split": "test", "star_events_count": 0} {"blob_id": "ea451e97fcd1141dfbd051ce96711d3e5c90523e", "bodies": ["ht = {}\nfor i, item in enumerate(words):\n if item in ht:\n ht[item].append(i)\n else:\n ht[item] = [i]\nself.n = len(words)\nself.ht = ht", "l1, l2 = (self.ht[word1], self.ht[word2])\ni1, i2 = (0, 0)\nres = abs(l1[0] - l2[0])\nwhile i1 != len(l1) and i2 != len(l2):\n res = min(res, abs(l1[i1] - l2[i2]))\n if l1[i1] < l2[i2]:\n i1 += 1\n else:\n i2 += 1\nreturn res"], "bodies_text": "<|body_start_0|>\n ht = {}\n for i, item in enumerate(words):\n if item in ht:\n ht[item].append(i)\n else:\n ht[item] = [i]\n self.n = len(words)\n self.ht = ht\n<|end_body_0|>\n\n<|body_start_1|>\n l1, l2 = (self.ht[word1], self.ht[word2])\n i1, i2 = (0, 0)\n res = abs(l1[0] - l2[0])\n while i1 != len(l1) and i2 != len(l2):\n res = min(res, abs(l1[i1] - l2[i2]))\n if l1[i1] < l2[i2]:\n i1 += 1\n else:\n i2 += 1\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "WordDistance", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ht = {}\n for i, item in enumerate(words):\n if item in ht:\n ht[item].append(i)\n else:\n ht[item] = [i]\n self.n = len(words)\n self.ht = ht\n<|end_body_0|>\n\n<|body_start_1|>\n l1, l2 = (self.ht[word1], self.ht[word2])\n i1, i2 = (0, 0)\n res = abs(l1[0] - l2[0])\n while i1 != len(l1) and i2 != len(l2):\n res = min(res, abs(l1[i1] - l2[i2]))\n if l1[i1] < l2[i2]:\n i1 += 1\n else:\n i2 += 1\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000264", "length_bytes": 812, "license_type": "no_license", "methods": [{"docstring": "initialize your data structure here. :type words: List[str]", "name": "__init__", "signature": "def __init__(self, words)"}, {"docstring": "Adds a word into the data structure. :type word1: str :type word2: str :rtype: int", "name": "shortest", "signature": "def shortest(self, word1, word2)"}], "n_methods": 2, "prompt": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, words): initialize your data structure here. :type words: List[str]\n- def shortest(self, word1, word2): Adds a word into the data structure. :type word1: str :type word2: str :rtype: int", "prompted_full_text": "Implement the Python class `WordDistance` described below.\n\nClass description:\nImplement the WordDistance class.\n\nMethod signatures and docstrings:\n- def __init__(self, words): initialize your data structure here. :type words: List[str]\n- def shortest(self, word1, word2): Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\n\n<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ht = {}\n for i, item in enumerate(words):\n if item in ht:\n ht[item].append(i)\n else:\n ht[item] = [i]\n self.n = len(words)\n self.ht = ht\n<|end_body_0|>\n\n<|body_start_1|>\n l1, l2 = (self.ht[word1], self.ht[word2])\n i1, i2 = (0, 0)\n res = abs(l1[0] - l2[0])\n while i1 != len(l1) and i2 != len(l2):\n res = min(res, abs(l1[i1] - l2[i2]))\n if l1[i1] < l2[i2]:\n i1 += 1\n else:\n i2 += 1\n return res\n<|end_body_1|>\n", "revision_id": "bc08a59d97384eca821801992cb4fa3b0c1d9eb5", "skeleton": "<|skeleton|>\nclass WordDistance:\n\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n <|body_0|>\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WordDistance:\n def __init__(self, words):\n \"\"\"initialize your data structure here. :type words: List[str]\"\"\"\n ht = {}\n for i, item in enumerate(words):\n if item in ht:\n ht[item].append(i)\n else:\n ht[item] = [i]\n self.n = len(words)\n self.ht = ht\n\n def shortest(self, word1, word2):\n \"\"\"Adds a word into the data structure. :type word1: str :type word2: str :rtype: int\"\"\"\n l1, l2 = (self.ht[word1], self.ht[word2])\n i1, i2 = (0, 0)\n res = abs(l1[0] - l2[0])\n while i1 != len(l1) and i2 != len(l2):\n res = min(res, abs(l1[i1] - l2[i2]))\n if l1[i1] < l2[i2]:\n i1 += 1\n else:\n i2 += 1\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "python/P244.py", "source_repo": "marshalma/Leetcode-Solutions", "split": "test", "star_events_count": 0} {"blob_id": "d7bd582f19fca523ea8fb6c125588eea41f29403", "bodies": ["responder = HttpResponder(handler)\nsock.settimeout(None)\nsock.setblocking(0)\nrequest_msg_reader = None\nforce_close = True\nlogging.debug('Connection from %s:%s', client_addr[0], client_addr[1])\ntry:\n try:\n if server.using_ssl:\n sock.set_accept_state()\n try:\n http.Handshake(sock, self.WRITE_TIMEOUT)\n except http.HttpSessionHandshakeUnexpectedEOF:\n logging.debug('Unexpected EOF from %s:%s', client_addr[0], client_addr[1])\n return\n request_msg, request_msg_reader, force_close, response_msg = responder(compat.partial(self._ReadRequest, sock, self.READ_TIMEOUT))\n if response_msg:\n logging.info('%s:%s %s %s', client_addr[0], client_addr[1], request_msg.start_line, response_msg.start_line.code)\n self._SendResponse(sock, request_msg, response_msg, self.WRITE_TIMEOUT)\n finally:\n http.ShutdownConnection(sock, self.CLOSE_TIMEOUT, self.WRITE_TIMEOUT, request_msg_reader, force_close)\n sock.close()\nfinally:\n logging.debug('Disconnected %s:%s', client_addr[0], client_addr[1])", "msg = http.HttpMessage()\ntry:\n reader = _HttpClientToServerMessageReader(sock, msg, timeout)\nexcept http.HttpSocketTimeout:\n raise http.HttpError('Timeout while reading request')\nexcept socket.error as err:\n raise http.HttpError('Error reading request: %s' % err)\nreturn (msg, reader)", "try:\n _HttpServerToClientMessageWriter(sock, req_msg, msg, timeout)\nexcept http.HttpSocketTimeout:\n raise http.HttpError('Timeout while sending response')\nexcept socket.error as err:\n raise http.HttpError('Error sending response: %s' % err)"], "bodies_text": "<|body_start_0|>\n responder = HttpResponder(handler)\n sock.settimeout(None)\n sock.setblocking(0)\n request_msg_reader = None\n force_close = True\n logging.debug('Connection from %s:%s', client_addr[0], client_addr[1])\n try:\n try:\n if server.using_ssl:\n sock.set_accept_state()\n try:\n http.Handshake(sock, self.WRITE_TIMEOUT)\n except http.HttpSessionHandshakeUnexpectedEOF:\n logging.debug('Unexpected EOF from %s:%s', client_addr[0], client_addr[1])\n return\n request_msg, request_msg_reader, force_close, response_msg = responder(compat.partial(self._ReadRequest, sock, self.READ_TIMEOUT))\n if response_msg:\n logging.info('%s:%s %s %s', client_addr[0], client_addr[1], request_msg.start_line, response_msg.start_line.code)\n self._SendResponse(sock, request_msg, response_msg, self.WRITE_TIMEOUT)\n finally:\n http.ShutdownConnection(sock, self.CLOSE_TIMEOUT, self.WRITE_TIMEOUT, request_msg_reader, force_close)\n sock.close()\n finally:\n logging.debug('Disconnected %s:%s', client_addr[0], client_addr[1])\n<|end_body_0|>\n\n<|body_start_1|>\n msg = http.HttpMessage()\n try:\n reader = _HttpClientToServerMessageReader(sock, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while reading request')\n except socket.error as err:\n raise http.HttpError('Error reading request: %s' % err)\n return (msg, reader)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n _HttpServerToClientMessageWriter(sock, req_msg, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while sending response')\n except socket.error as err:\n raise http.HttpError('Error sending response: %s' % err)\n<|end_body_2|>\n", "class_docstring": "Implements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.", "class_name": "HttpServerRequestExecutor", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HttpServerRequestExecutor:\n \"\"\"Implements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.\"\"\"\n\n def __init__(self, server, handler, sock, client_addr):\n \"\"\"Initializes this class.\"\"\"\n <|body_0|>\n\n def _ReadRequest(sock, timeout):\n \"\"\"Reads a request sent by client.\"\"\"\n <|body_1|>\n\n def _SendResponse(sock, req_msg, msg, timeout):\n \"\"\"Sends the response to the client.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n responder = HttpResponder(handler)\n sock.settimeout(None)\n sock.setblocking(0)\n request_msg_reader = None\n force_close = True\n logging.debug('Connection from %s:%s', client_addr[0], client_addr[1])\n try:\n try:\n if server.using_ssl:\n sock.set_accept_state()\n try:\n http.Handshake(sock, self.WRITE_TIMEOUT)\n except http.HttpSessionHandshakeUnexpectedEOF:\n logging.debug('Unexpected EOF from %s:%s', client_addr[0], client_addr[1])\n return\n request_msg, request_msg_reader, force_close, response_msg = responder(compat.partial(self._ReadRequest, sock, self.READ_TIMEOUT))\n if response_msg:\n logging.info('%s:%s %s %s', client_addr[0], client_addr[1], request_msg.start_line, response_msg.start_line.code)\n self._SendResponse(sock, request_msg, response_msg, self.WRITE_TIMEOUT)\n finally:\n http.ShutdownConnection(sock, self.CLOSE_TIMEOUT, self.WRITE_TIMEOUT, request_msg_reader, force_close)\n sock.close()\n finally:\n logging.debug('Disconnected %s:%s', client_addr[0], client_addr[1])\n<|end_body_0|>\n\n<|body_start_1|>\n msg = http.HttpMessage()\n try:\n reader = _HttpClientToServerMessageReader(sock, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while reading request')\n except socket.error as err:\n raise http.HttpError('Error reading request: %s' % err)\n return (msg, reader)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n _HttpServerToClientMessageWriter(sock, req_msg, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while sending response')\n except socket.error as err:\n raise http.HttpError('Error sending response: %s' % err)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000265", "length_bytes": 20673, "license_type": "permissive", "methods": [{"docstring": "Initializes this class.", "name": "__init__", "signature": "def __init__(self, server, handler, sock, client_addr)"}, {"docstring": "Reads a request sent by client.", "name": "_ReadRequest", "signature": "def _ReadRequest(sock, timeout)"}, {"docstring": "Sends the response to the client.", "name": "_SendResponse", "signature": "def _SendResponse(sock, req_msg, msg, timeout)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_006630", "prompt": "Implement the Python class `HttpServerRequestExecutor` described below.\n\nClass description:\nImplements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.\n\nMethod signatures and docstrings:\n- def __init__(self, server, handler, sock, client_addr): Initializes this class.\n- def _ReadRequest(sock, timeout): Reads a request sent by client.\n- def _SendResponse(sock, req_msg, msg, timeout): Sends the response to the client.", "prompted_full_text": "Implement the Python class `HttpServerRequestExecutor` described below.\n\nClass description:\nImplements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.\n\nMethod signatures and docstrings:\n- def __init__(self, server, handler, sock, client_addr): Initializes this class.\n- def _ReadRequest(sock, timeout): Reads a request sent by client.\n- def _SendResponse(sock, req_msg, msg, timeout): Sends the response to the client.\n\n<|skeleton|>\nclass HttpServerRequestExecutor:\n \"\"\"Implements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.\"\"\"\n\n def __init__(self, server, handler, sock, client_addr):\n \"\"\"Initializes this class.\"\"\"\n <|body_0|>\n\n def _ReadRequest(sock, timeout):\n \"\"\"Reads a request sent by client.\"\"\"\n <|body_1|>\n\n def _SendResponse(sock, req_msg, msg, timeout):\n \"\"\"Sends the response to the client.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n responder = HttpResponder(handler)\n sock.settimeout(None)\n sock.setblocking(0)\n request_msg_reader = None\n force_close = True\n logging.debug('Connection from %s:%s', client_addr[0], client_addr[1])\n try:\n try:\n if server.using_ssl:\n sock.set_accept_state()\n try:\n http.Handshake(sock, self.WRITE_TIMEOUT)\n except http.HttpSessionHandshakeUnexpectedEOF:\n logging.debug('Unexpected EOF from %s:%s', client_addr[0], client_addr[1])\n return\n request_msg, request_msg_reader, force_close, response_msg = responder(compat.partial(self._ReadRequest, sock, self.READ_TIMEOUT))\n if response_msg:\n logging.info('%s:%s %s %s', client_addr[0], client_addr[1], request_msg.start_line, response_msg.start_line.code)\n self._SendResponse(sock, request_msg, response_msg, self.WRITE_TIMEOUT)\n finally:\n http.ShutdownConnection(sock, self.CLOSE_TIMEOUT, self.WRITE_TIMEOUT, request_msg_reader, force_close)\n sock.close()\n finally:\n logging.debug('Disconnected %s:%s', client_addr[0], client_addr[1])\n<|end_body_0|>\n\n<|body_start_1|>\n msg = http.HttpMessage()\n try:\n reader = _HttpClientToServerMessageReader(sock, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while reading request')\n except socket.error as err:\n raise http.HttpError('Error reading request: %s' % err)\n return (msg, reader)\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n _HttpServerToClientMessageWriter(sock, req_msg, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while sending response')\n except socket.error as err:\n raise http.HttpError('Error sending response: %s' % err)\n<|end_body_2|>\n", "revision_id": "456ea285a7583183c2c8e5bcffe9006ec8a9d658", "skeleton": "<|skeleton|>\nclass HttpServerRequestExecutor:\n \"\"\"Implements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.\"\"\"\n\n def __init__(self, server, handler, sock, client_addr):\n \"\"\"Initializes this class.\"\"\"\n <|body_0|>\n\n def _ReadRequest(sock, timeout):\n \"\"\"Reads a request sent by client.\"\"\"\n <|body_1|>\n\n def _SendResponse(sock, req_msg, msg, timeout):\n \"\"\"Sends the response to the client.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HttpServerRequestExecutor:\n \"\"\"Implements server side of HTTP. This class implements the server side of HTTP. It's based on code of Python's BaseHTTPServer, from both version 2.4 and 3k. It does not support non-ASCII character encodings. Keep-alive connections are not supported.\"\"\"\n\n def __init__(self, server, handler, sock, client_addr):\n \"\"\"Initializes this class.\"\"\"\n responder = HttpResponder(handler)\n sock.settimeout(None)\n sock.setblocking(0)\n request_msg_reader = None\n force_close = True\n logging.debug('Connection from %s:%s', client_addr[0], client_addr[1])\n try:\n try:\n if server.using_ssl:\n sock.set_accept_state()\n try:\n http.Handshake(sock, self.WRITE_TIMEOUT)\n except http.HttpSessionHandshakeUnexpectedEOF:\n logging.debug('Unexpected EOF from %s:%s', client_addr[0], client_addr[1])\n return\n request_msg, request_msg_reader, force_close, response_msg = responder(compat.partial(self._ReadRequest, sock, self.READ_TIMEOUT))\n if response_msg:\n logging.info('%s:%s %s %s', client_addr[0], client_addr[1], request_msg.start_line, response_msg.start_line.code)\n self._SendResponse(sock, request_msg, response_msg, self.WRITE_TIMEOUT)\n finally:\n http.ShutdownConnection(sock, self.CLOSE_TIMEOUT, self.WRITE_TIMEOUT, request_msg_reader, force_close)\n sock.close()\n finally:\n logging.debug('Disconnected %s:%s', client_addr[0], client_addr[1])\n\n def _ReadRequest(sock, timeout):\n \"\"\"Reads a request sent by client.\"\"\"\n msg = http.HttpMessage()\n try:\n reader = _HttpClientToServerMessageReader(sock, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while reading request')\n except socket.error as err:\n raise http.HttpError('Error reading request: %s' % err)\n return (msg, reader)\n\n def _SendResponse(sock, req_msg, msg, timeout):\n \"\"\"Sends the response to the client.\"\"\"\n try:\n _HttpServerToClientMessageWriter(sock, req_msg, msg, timeout)\n except http.HttpSocketTimeout:\n raise http.HttpError('Timeout while sending response')\n except socket.error as err:\n raise http.HttpError('Error sending response: %s' % err)\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/http/server.py", "source_repo": "ganeti/ganeti", "split": "test", "star_events_count": 465} {"blob_id": "fe6cbb2ba88d15681b6bce84c007c6a09faf74e3", "bodies": ["if head == None or head.next == None:\n return head\ncur = self.reverseList(head.next)\nhead.next.next = head\nhead.next = None\nreturn cur", "pre = None\ncur = head\nwhile cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\nreturn pre"], "bodies_text": "<|body_start_0|>\n if head == None or head.next == None:\n return head\n cur = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return cur\n<|end_body_0|>\n\n<|body_start_1|>\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def reverseList2(self, head: ListNode) -> ListNode:\n \"\"\"迭代\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if head == None or head.next == None:\n return head\n cur = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return cur\n<|end_body_0|>\n\n<|body_start_1|>\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000266", "length_bytes": 1932, "license_type": "no_license", "methods": [{"docstring": "递归", "name": "reverseList", "signature": "def reverseList(self, head: ListNode) -> ListNode"}, {"docstring": "迭代", "name": "reverseList2", "signature": "def reverseList2(self, head: ListNode) -> ListNode"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012833", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList(self, head: ListNode) -> ListNode: 递归\n- def reverseList2(self, head: ListNode) -> ListNode: 迭代", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def reverseList(self, head: ListNode) -> ListNode: 递归\n- def reverseList2(self, head: ListNode) -> ListNode: 迭代\n\n<|skeleton|>\nclass Solution:\n\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def reverseList2(self, head: ListNode) -> ListNode:\n \"\"\"迭代\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if head == None or head.next == None:\n return head\n cur = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return cur\n<|end_body_0|>\n\n<|body_start_1|>\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n<|end_body_1|>\n", "revision_id": "13e7ec9fe7a92ab13b247bd4edeb1ada5de81a08", "skeleton": "<|skeleton|>\nclass Solution:\n\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"递归\"\"\"\n <|body_0|>\n\n def reverseList2(self, head: ListNode) -> ListNode:\n \"\"\"迭代\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n \"\"\"递归\"\"\"\n if head == None or head.next == None:\n return head\n cur = self.reverseList(head.next)\n head.next.next = head\n head.next = None\n return cur\n\n def reverseList2(self, head: ListNode) -> ListNode:\n \"\"\"迭代\"\"\"\n pre = None\n cur = head\n while cur:\n tmp = cur.next\n cur.next = pre\n pre = cur\n cur = tmp\n return pre\n", "source": "the_stack_v2_python_sparse", "source_path": "Algorithms/206_Reverse_Linked_List/Reverse_Linked_List.py", "source_repo": "lirui-ML/my_leetcode", "split": "test", "star_events_count": 1} {"blob_id": "8ca0d795fce0a25276b3b0a9ea44e4f744877bff", "bodies": ["comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\ncomp.save()\nself.assertEqual(str(comp), f'Competition {comp.id}: Pending')", "comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes', is_active=True)\ncomp.save()\nself.assertEqual(str(comp), f'Competition {comp.id}: 5000')", "comp = Competition(tickets=5000, tickets_left=0, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\ncomp.save()\nself.assertEqual(str(comp), f'Competition {comp.id}: Ended')", "comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\ncomp.save()\nself.assertEqual(comp.tickets, 5000)\nself.assertEqual(comp.tickets_left, 5000)\nself.assertEqual(comp.question, 'Is this a test?')\nself.assertEqual(comp.answer_1, 'Yes')\nself.assertEqual(comp.answer_2, 'No')\nself.assertEqual(comp.answer_3, 'Maybe')\nself.assertEqual(comp.correct_answer, 'Yes')\nself.assertEqual(comp.is_active, False)"], "bodies_text": "<|body_start_0|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Pending')\n<|end_body_0|>\n\n<|body_start_1|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes', is_active=True)\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: 5000')\n<|end_body_1|>\n\n<|body_start_2|>\n comp = Competition(tickets=5000, tickets_left=0, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Ended')\n<|end_body_2|>\n\n<|body_start_3|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(comp.tickets, 5000)\n self.assertEqual(comp.tickets_left, 5000)\n self.assertEqual(comp.question, 'Is this a test?')\n self.assertEqual(comp.answer_1, 'Yes')\n self.assertEqual(comp.answer_2, 'No')\n self.assertEqual(comp.answer_3, 'Maybe')\n self.assertEqual(comp.correct_answer, 'Yes')\n self.assertEqual(comp.is_active, False)\n<|end_body_3|>\n", "class_docstring": "Test for Competition Model", "class_name": "ModelTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ModelTest:\n \"\"\"Test for Competition Model\"\"\"\n\n def test_str_comp_pending(self):\n \"\"\"Test str return for competition that is pending\"\"\"\n <|body_0|>\n\n def test_str_comp_active(self):\n \"\"\"Test str return for competition that is active\"\"\"\n <|body_1|>\n\n def test_str_comp_ended(self):\n \"\"\"Test str return for competition has ended\"\"\"\n <|body_2|>\n\n def test_create_competition(self):\n \"\"\"Test creation of a competition\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Pending')\n<|end_body_0|>\n\n<|body_start_1|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes', is_active=True)\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: 5000')\n<|end_body_1|>\n\n<|body_start_2|>\n comp = Competition(tickets=5000, tickets_left=0, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Ended')\n<|end_body_2|>\n\n<|body_start_3|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(comp.tickets, 5000)\n self.assertEqual(comp.tickets_left, 5000)\n self.assertEqual(comp.question, 'Is this a test?')\n self.assertEqual(comp.answer_1, 'Yes')\n self.assertEqual(comp.answer_2, 'No')\n self.assertEqual(comp.answer_3, 'Maybe')\n self.assertEqual(comp.correct_answer, 'Yes')\n self.assertEqual(comp.is_active, False)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000267", "length_bytes": 5758, "license_type": "no_license", "methods": [{"docstring": "Test str return for competition that is pending", "name": "test_str_comp_pending", "signature": "def test_str_comp_pending(self)"}, {"docstring": "Test str return for competition that is active", "name": "test_str_comp_active", "signature": "def test_str_comp_active(self)"}, {"docstring": "Test str return for competition has ended", "name": "test_str_comp_ended", "signature": "def test_str_comp_ended(self)"}, {"docstring": "Test creation of a competition", "name": "test_create_competition", "signature": "def test_create_competition(self)"}], "n_methods": 4, "prompt": "Implement the Python class `ModelTest` described below.\n\nClass description:\nTest for Competition Model\n\nMethod signatures and docstrings:\n- def test_str_comp_pending(self): Test str return for competition that is pending\n- def test_str_comp_active(self): Test str return for competition that is active\n- def test_str_comp_ended(self): Test str return for competition has ended\n- def test_create_competition(self): Test creation of a competition", "prompted_full_text": "Implement the Python class `ModelTest` described below.\n\nClass description:\nTest for Competition Model\n\nMethod signatures and docstrings:\n- def test_str_comp_pending(self): Test str return for competition that is pending\n- def test_str_comp_active(self): Test str return for competition that is active\n- def test_str_comp_ended(self): Test str return for competition has ended\n- def test_create_competition(self): Test creation of a competition\n\n<|skeleton|>\nclass ModelTest:\n \"\"\"Test for Competition Model\"\"\"\n\n def test_str_comp_pending(self):\n \"\"\"Test str return for competition that is pending\"\"\"\n <|body_0|>\n\n def test_str_comp_active(self):\n \"\"\"Test str return for competition that is active\"\"\"\n <|body_1|>\n\n def test_str_comp_ended(self):\n \"\"\"Test str return for competition has ended\"\"\"\n <|body_2|>\n\n def test_create_competition(self):\n \"\"\"Test creation of a competition\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Pending')\n<|end_body_0|>\n\n<|body_start_1|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes', is_active=True)\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: 5000')\n<|end_body_1|>\n\n<|body_start_2|>\n comp = Competition(tickets=5000, tickets_left=0, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Ended')\n<|end_body_2|>\n\n<|body_start_3|>\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(comp.tickets, 5000)\n self.assertEqual(comp.tickets_left, 5000)\n self.assertEqual(comp.question, 'Is this a test?')\n self.assertEqual(comp.answer_1, 'Yes')\n self.assertEqual(comp.answer_2, 'No')\n self.assertEqual(comp.answer_3, 'Maybe')\n self.assertEqual(comp.correct_answer, 'Yes')\n self.assertEqual(comp.is_active, False)\n<|end_body_3|>\n", "revision_id": "db719f814c9a08bf9cdb915a76b5665ed40657ff", "skeleton": "<|skeleton|>\nclass ModelTest:\n \"\"\"Test for Competition Model\"\"\"\n\n def test_str_comp_pending(self):\n \"\"\"Test str return for competition that is pending\"\"\"\n <|body_0|>\n\n def test_str_comp_active(self):\n \"\"\"Test str return for competition that is active\"\"\"\n <|body_1|>\n\n def test_str_comp_ended(self):\n \"\"\"Test str return for competition has ended\"\"\"\n <|body_2|>\n\n def test_create_competition(self):\n \"\"\"Test creation of a competition\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ModelTest:\n \"\"\"Test for Competition Model\"\"\"\n\n def test_str_comp_pending(self):\n \"\"\"Test str return for competition that is pending\"\"\"\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Pending')\n\n def test_str_comp_active(self):\n \"\"\"Test str return for competition that is active\"\"\"\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes', is_active=True)\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: 5000')\n\n def test_str_comp_ended(self):\n \"\"\"Test str return for competition has ended\"\"\"\n comp = Competition(tickets=5000, tickets_left=0, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(str(comp), f'Competition {comp.id}: Ended')\n\n def test_create_competition(self):\n \"\"\"Test creation of a competition\"\"\"\n comp = Competition(tickets=5000, tickets_left=5000, question='Is this a test?', answer_1='Yes', answer_2='No', answer_3='Maybe', correct_answer='Yes')\n comp.save()\n self.assertEqual(comp.tickets, 5000)\n self.assertEqual(comp.tickets_left, 5000)\n self.assertEqual(comp.question, 'Is this a test?')\n self.assertEqual(comp.answer_1, 'Yes')\n self.assertEqual(comp.answer_2, 'No')\n self.assertEqual(comp.answer_3, 'Maybe')\n self.assertEqual(comp.correct_answer, 'Yes')\n self.assertEqual(comp.is_active, False)\n", "source": "the_stack_v2_python_sparse", "source_path": "competition/tests.py", "source_repo": "msped/projectparts", "split": "test", "star_events_count": 0} {"blob_id": "a73cda9db45de5a687378be465f6ff8bd68c757d", "bodies": ["self.d = dict()\nself.stk = []\nself.length = capacity", "if key in self.d:\n ans = self.d[key]\n self.stk.remove(key)\n self.stk.append(key)\n return ans\nelse:\n return -1", "if key not in self.d:\n if len(self.d) == self.length:\n temp = self.stk.pop(0)\n del self.d[temp]\n self.d[key] = value\n self.stk.append(key)\nelse:\n self.d[key] = value\n self.stk.remove(key)\n self.stk.append(key)"], "bodies_text": "<|body_start_0|>\n self.d = dict()\n self.stk = []\n self.length = capacity\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.d:\n ans = self.d[key]\n self.stk.remove(key)\n self.stk.append(key)\n return ans\n else:\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if key not in self.d:\n if len(self.d) == self.length:\n temp = self.stk.pop(0)\n del self.d[temp]\n self.d[key] = value\n self.stk.append(key)\n else:\n self.d[key] = value\n self.stk.remove(key)\n self.stk.append(key)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "LRUCache", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = dict()\n self.stk = []\n self.length = capacity\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.d:\n ans = self.d[key]\n self.stk.remove(key)\n self.stk.append(key)\n return ans\n else:\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if key not in self.d:\n if len(self.d) == self.length:\n temp = self.stk.pop(0)\n del self.d[temp]\n self.d[key] = value\n self.stk.append(key)\n else:\n self.d[key] = value\n self.stk.remove(key)\n self.stk.append(key)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000268", "length_bytes": 1062, "license_type": "no_license", "methods": [{"docstring": ":type capacity: int", "name": "__init__", "signature": "def __init__(self, capacity)"}, {"docstring": ":type key: int :rtype: int", "name": "get", "signature": "def get(self, key)"}, {"docstring": ":type key: int :type value: int :rtype: void", "name": "put", "signature": "def put(self, key, value)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_000276", "prompt": "Implement the Python class `LRUCache` described below.\n\nClass description:\nImplement the LRUCache class.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int\n- def get(self, key): :type key: int :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: void", "prompted_full_text": "Implement the Python class `LRUCache` described below.\n\nClass description:\nImplement the LRUCache class.\n\nMethod signatures and docstrings:\n- def __init__(self, capacity): :type capacity: int\n- def get(self, key): :type key: int :rtype: int\n- def put(self, key, value): :type key: int :type value: int :rtype: void\n\n<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.d = dict()\n self.stk = []\n self.length = capacity\n<|end_body_0|>\n\n<|body_start_1|>\n if key in self.d:\n ans = self.d[key]\n self.stk.remove(key)\n self.stk.append(key)\n return ans\n else:\n return -1\n<|end_body_1|>\n\n<|body_start_2|>\n if key not in self.d:\n if len(self.d) == self.length:\n temp = self.stk.pop(0)\n del self.d[temp]\n self.d[key] = value\n self.stk.append(key)\n else:\n self.d[key] = value\n self.stk.remove(key)\n self.stk.append(key)\n<|end_body_2|>\n", "revision_id": "d8c3be5937c54b740ebccd0b373a67ece46773f3", "skeleton": "<|skeleton|>\nclass LRUCache:\n\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n <|body_0|>\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n <|body_1|>\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LRUCache:\n def __init__(self, capacity):\n \"\"\":type capacity: int\"\"\"\n self.d = dict()\n self.stk = []\n self.length = capacity\n\n def get(self, key):\n \"\"\":type key: int :rtype: int\"\"\"\n if key in self.d:\n ans = self.d[key]\n self.stk.remove(key)\n self.stk.append(key)\n return ans\n else:\n return -1\n\n def put(self, key, value):\n \"\"\":type key: int :type value: int :rtype: void\"\"\"\n if key not in self.d:\n if len(self.d) == self.length:\n temp = self.stk.pop(0)\n del self.d[temp]\n self.d[key] = value\n self.stk.append(key)\n else:\n self.d[key] = value\n self.stk.remove(key)\n self.stk.append(key)\n", "source": "the_stack_v2_python_sparse", "source_path": "LRU Cache.py", "source_repo": "shank54/Leetcode", "split": "test", "star_events_count": 0} {"blob_id": "c8ec9d0f6b13b349ed565761a9edee521b00c1b8", "bodies": ["answer = ''\nfor s in strs:\n answer += str(len(s)) + ':' + s\nreturn answer", "strs = []\nwhile s:\n i = s.find(':')\n length = int(s[:i])\n s = s[i + 1:]\n strs.append(s[:length])\n s = s[length:]\nreturn strs"], "bodies_text": "<|body_start_0|>\n answer = ''\n for s in strs:\n answer += str(len(s)) + ':' + s\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n strs = []\n while s:\n i = s.find(':')\n length = int(s[:i])\n s = s[i + 1:]\n strs.append(s[:length])\n s = s[length:]\n return strs\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n answer = ''\n for s in strs:\n answer += str(len(s)) + ':' + s\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n strs = []\n while s:\n i = s.find(':')\n length = int(s[:i])\n s = s[i + 1:]\n strs.append(s[:length])\n s = s[length:]\n return strs\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000269", "length_bytes": 2430, "license_type": "no_license", "methods": [{"docstring": "Encodes a list of strings to a single string. :type strs: List[str] :rtype: str", "name": "encode", "signature": "def encode(self, strs)"}, {"docstring": "Decodes a single string to a list of strings. :type s: str :rtype: List[str]", "name": "decode", "signature": "def decode(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009880", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def encode(self, strs): Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\n- def decode(self, s): Decodes a single string to a list of strings. :type s: str :rtype: List[str]", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def encode(self, strs): Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\n- def decode(self, s): Decodes a single string to a list of strings. :type s: str :rtype: List[str]\n\n<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n answer = ''\n for s in strs:\n answer += str(len(s)) + ':' + s\n return answer\n<|end_body_0|>\n\n<|body_start_1|>\n strs = []\n while s:\n i = s.find(':')\n length = int(s[:i])\n s = s[i + 1:]\n strs.append(s[:length])\n s = s[length:]\n return strs\n<|end_body_1|>\n", "revision_id": "eb6b11f97a022b66716cb3890cc56c58f62e8aa4", "skeleton": "<|skeleton|>\nclass Codec:\n\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n <|body_0|>\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def encode(self, strs):\n \"\"\"Encodes a list of strings to a single string. :type strs: List[str] :rtype: str\"\"\"\n answer = ''\n for s in strs:\n answer += str(len(s)) + ':' + s\n return answer\n\n def decode(self, s):\n \"\"\"Decodes a single string to a list of strings. :type s: str :rtype: List[str]\"\"\"\n strs = []\n while s:\n i = s.find(':')\n length = int(s[:i])\n s = s[i + 1:]\n strs.append(s[:length])\n s = s[length:]\n return strs\n", "source": "the_stack_v2_python_sparse", "source_path": "problemSets/top75/271.py", "source_repo": "Th3Lourde/l33tcode", "split": "test", "star_events_count": 0} {"blob_id": "8abd285109d8349c758c7a75eb6690ec3132f4d6", "bodies": ["obj.save()\nfrom celery_tasks.html.tasks import generate_static_list_search_html\ngenerate_static_list_search_html.delay()", "obj.delete()\nfrom celery_tasks.html.tasks import generate_static_list_search_html\ngenerate_static_list_search_html.delay()"], "bodies_text": "<|body_start_0|>\n obj.save()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n<|end_body_0|>\n\n<|body_start_1|>\n obj.delete()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n<|end_body_1|>\n", "class_docstring": "商品类别模型站点管理类", "class_name": "GoodsCategoryAdmin", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GoodsCategoryAdmin:\n \"\"\"商品类别模型站点管理类\"\"\"\n\n def save_model(self, request, obj, form, change):\n \"\"\"当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool\"\"\"\n <|body_0|>\n\n def delete_model(self, request, obj):\n \"\"\"当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obj.save()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n<|end_body_0|>\n\n<|body_start_1|>\n obj.delete()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000270", "length_bytes": 2663, "license_type": "no_license", "methods": [{"docstring": "当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool", "name": "save_model", "signature": "def save_model(self, request, obj, form, change)"}, {"docstring": "当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象", "name": "delete_model", "signature": "def delete_model(self, request, obj)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032742", "prompt": "Implement the Python class `GoodsCategoryAdmin` described below.\n\nClass description:\n商品类别模型站点管理类\n\nMethod signatures and docstrings:\n- def save_model(self, request, obj, form, change): 当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool\n- def delete_model(self, request, obj): 当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象", "prompted_full_text": "Implement the Python class `GoodsCategoryAdmin` described below.\n\nClass description:\n商品类别模型站点管理类\n\nMethod signatures and docstrings:\n- def save_model(self, request, obj, form, change): 当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool\n- def delete_model(self, request, obj): 当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象\n\n<|skeleton|>\nclass GoodsCategoryAdmin:\n \"\"\"商品类别模型站点管理类\"\"\"\n\n def save_model(self, request, obj, form, change):\n \"\"\"当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool\"\"\"\n <|body_0|>\n\n def delete_model(self, request, obj):\n \"\"\"当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n obj.save()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n<|end_body_0|>\n\n<|body_start_1|>\n obj.delete()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n<|end_body_1|>\n", "revision_id": "61798f2c3624bfde540cfb7469d42564ffe674a6", "skeleton": "<|skeleton|>\nclass GoodsCategoryAdmin:\n \"\"\"商品类别模型站点管理类\"\"\"\n\n def save_model(self, request, obj, form, change):\n \"\"\"当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool\"\"\"\n <|body_0|>\n\n def delete_model(self, request, obj):\n \"\"\"当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GoodsCategoryAdmin:\n \"\"\"商品类别模型站点管理类\"\"\"\n\n def save_model(self, request, obj, form, change):\n \"\"\"当点击admin中的保存按钮时会来调用此方法 :param request:保存时本次请求对象 :param obj:本次要保存的模型对象 :param form:admin中的表单 :param change: 是否更改 bool\"\"\"\n obj.save()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n\n def delete_model(self, request, obj):\n \"\"\"当点击admin中的删除按钮时会来调此方法 :param request: 删除时本次请求对象 :param obj: 本次要删除的对象\"\"\"\n obj.delete()\n from celery_tasks.html.tasks import generate_static_list_search_html\n generate_static_list_search_html.delay()\n", "source": "the_stack_v2_python_sparse", "source_path": "meiduo_mall/meiduo_mall/apps/goods/admin.py", "source_repo": "MEGALO-JOE/meiduo", "split": "test", "star_events_count": 0} {"blob_id": "06bdb34f3ffc77a71db13eed330ae21a129c0a1f", "bodies": ["self.body_spec = body_spec\nself.nn_spec = nn_spec\nself.body_decoder = BodyDecoder(body_spec)\nself.brain_decoder = NeuralNetworkDecoder(nn_spec, body_spec)", "obj = yaml.load(stream)\nrobot = Robot()\nrobot.id = obj.get('id', 0)\nrobot.body.CopyFrom(self.body_decoder.decode(obj))\nrobot.brain.CopyFrom(self.brain_decoder.decode(obj))\nreturn robot"], "bodies_text": "<|body_start_0|>\n self.body_spec = body_spec\n self.nn_spec = nn_spec\n self.body_decoder = BodyDecoder(body_spec)\n self.brain_decoder = NeuralNetworkDecoder(nn_spec, body_spec)\n<|end_body_0|>\n\n<|body_start_1|>\n obj = yaml.load(stream)\n robot = Robot()\n robot.id = obj.get('id', 0)\n robot.body.CopyFrom(self.body_decoder.decode(obj))\n robot.brain.CopyFrom(self.brain_decoder.decode(obj))\n return robot\n<|end_body_1|>\n", "class_docstring": "Sample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.", "class_name": "YamlToRobot", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass YamlToRobot:\n \"\"\"Sample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.\"\"\"\n\n def __init__(self, body_spec, nn_spec):\n \"\"\":param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation\"\"\"\n <|body_0|>\n\n def get_protobuf(self, stream):\n \"\"\"Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.body_spec = body_spec\n self.nn_spec = nn_spec\n self.body_decoder = BodyDecoder(body_spec)\n self.brain_decoder = NeuralNetworkDecoder(nn_spec, body_spec)\n<|end_body_0|>\n\n<|body_start_1|>\n obj = yaml.load(stream)\n robot = Robot()\n robot.id = obj.get('id', 0)\n robot.body.CopyFrom(self.body_decoder.decode(obj))\n robot.brain.CopyFrom(self.brain_decoder.decode(obj))\n return robot\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000271", "length_bytes": 3202, "license_type": "permissive", "methods": [{"docstring": ":param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation", "name": "__init__", "signature": "def __init__(self, body_spec, nn_spec)"}, {"docstring": "Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot", "name": "get_protobuf", "signature": "def get_protobuf(self, stream)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_002831", "prompt": "Implement the Python class `YamlToRobot` described below.\n\nClass description:\nSample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.\n\nMethod signatures and docstrings:\n- def __init__(self, body_spec, nn_spec): :param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation\n- def get_protobuf(self, stream): Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot", "prompted_full_text": "Implement the Python class `YamlToRobot` described below.\n\nClass description:\nSample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.\n\nMethod signatures and docstrings:\n- def __init__(self, body_spec, nn_spec): :param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation\n- def get_protobuf(self, stream): Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot\n\n<|skeleton|>\nclass YamlToRobot:\n \"\"\"Sample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.\"\"\"\n\n def __init__(self, body_spec, nn_spec):\n \"\"\":param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation\"\"\"\n <|body_0|>\n\n def get_protobuf(self, stream):\n \"\"\"Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.body_spec = body_spec\n self.nn_spec = nn_spec\n self.body_decoder = BodyDecoder(body_spec)\n self.brain_decoder = NeuralNetworkDecoder(nn_spec, body_spec)\n<|end_body_0|>\n\n<|body_start_1|>\n obj = yaml.load(stream)\n robot = Robot()\n robot.id = obj.get('id', 0)\n robot.body.CopyFrom(self.body_decoder.decode(obj))\n robot.brain.CopyFrom(self.brain_decoder.decode(obj))\n return robot\n<|end_body_1|>\n", "revision_id": "70e65320a28fe04e121145b2cdde289d3052728a", "skeleton": "<|skeleton|>\nclass YamlToRobot:\n \"\"\"Sample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.\"\"\"\n\n def __init__(self, body_spec, nn_spec):\n \"\"\":param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation\"\"\"\n <|body_0|>\n\n def get_protobuf(self, stream):\n \"\"\"Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class YamlToRobot:\n \"\"\"Sample converter creates a Robot protobuf message from a YAML stream and a body / neural net spec.\"\"\"\n\n def __init__(self, body_spec, nn_spec):\n \"\"\":param body_spec: :type body_spec: BodyImplementation :param nn_spec: :type nn_spec: NeuralNetImplementation\"\"\"\n self.body_spec = body_spec\n self.nn_spec = nn_spec\n self.body_decoder = BodyDecoder(body_spec)\n self.brain_decoder = NeuralNetworkDecoder(nn_spec, body_spec)\n\n def get_protobuf(self, stream):\n \"\"\"Returns a protobuf `Robot` for the given stream. :param stream: :type stream: stream :return: :rtype: Robot\"\"\"\n obj = yaml.load(stream)\n robot = Robot()\n robot.id = obj.get('id', 0)\n robot.body.CopyFrom(self.body_decoder.decode(obj))\n robot.brain.CopyFrom(self.brain_decoder.decode(obj))\n return robot\n", "source": "the_stack_v2_python_sparse", "source_path": "revolve/convert/yaml.py", "source_repo": "ElteHupkes/revolve", "split": "test", "star_events_count": 0} {"blob_id": "b382127586643eac712312893df81da0979865f8", "bodies": ["config_dict = self.config_dict\nif len(config_dict['c']) > 1:\n config_dict['c'] = config_dict['c'][0]\ntry:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=y_errors, xerr=x_errors, **config_dict)\nexcept (IndexError, TypeError):\n return self.errorbar_grid_list(grid_list=grid)", "if len(grid_list) == 0:\n return\ncolor = itertools.cycle(self.config_dict['c'])\nconfig_dict = self.config_dict\nconfig_dict.pop('c')\ntry:\n for grid in grid_list:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), c=next(color), **config_dict)\nexcept IndexError:\n return None", "config_dict = self.config_dict\nconfig_dict.pop('c')\nplt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap)\nplt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), zorder=0.0, **self.config_dict)"], "bodies_text": "<|body_start_0|>\n config_dict = self.config_dict\n if len(config_dict['c']) > 1:\n config_dict['c'] = config_dict['c'][0]\n try:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=y_errors, xerr=x_errors, **config_dict)\n except (IndexError, TypeError):\n return self.errorbar_grid_list(grid_list=grid)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(grid_list) == 0:\n return\n color = itertools.cycle(self.config_dict['c'])\n config_dict = self.config_dict\n config_dict.pop('c')\n try:\n for grid in grid_list:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), c=next(color), **config_dict)\n except IndexError:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n config_dict = self.config_dict\n config_dict.pop('c')\n plt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap)\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), zorder=0.0, **self.config_dict)\n<|end_body_2|>\n", "class_docstring": "Plots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.", "class_name": "GridErrorbar", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GridErrorbar:\n \"\"\"Plots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.\"\"\"\n\n def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).\"\"\"\n <|body_0|>\n\n def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.\"\"\"\n <|body_1|>\n\n def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n config_dict = self.config_dict\n if len(config_dict['c']) > 1:\n config_dict['c'] = config_dict['c'][0]\n try:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=y_errors, xerr=x_errors, **config_dict)\n except (IndexError, TypeError):\n return self.errorbar_grid_list(grid_list=grid)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(grid_list) == 0:\n return\n color = itertools.cycle(self.config_dict['c'])\n config_dict = self.config_dict\n config_dict.pop('c')\n try:\n for grid in grid_list:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), c=next(color), **config_dict)\n except IndexError:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n config_dict = self.config_dict\n config_dict.pop('c')\n plt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap)\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), zorder=0.0, **self.config_dict)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000272", "length_bytes": 4685, "license_type": "permissive", "methods": [{"docstring": "Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).", "name": "errorbar_grid", "signature": "def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None)"}, {"docstring": "Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.", "name": "errorbar_grid_list", "signature": "def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None)"}, {"docstring": "Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.", "name": "errorbar_grid_colored", "signature": "def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_036509", "prompt": "Implement the Python class `GridErrorbar` described below.\n\nClass description:\nPlots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.\n\nMethod signatures and docstrings:\n- def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None): Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).\n- def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None): Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.\n- def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None): Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.", "prompted_full_text": "Implement the Python class `GridErrorbar` described below.\n\nClass description:\nPlots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.\n\nMethod signatures and docstrings:\n- def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None): Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).\n- def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None): Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.\n- def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None): Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.\n\n<|skeleton|>\nclass GridErrorbar:\n \"\"\"Plots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.\"\"\"\n\n def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).\"\"\"\n <|body_0|>\n\n def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.\"\"\"\n <|body_1|>\n\n def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n config_dict = self.config_dict\n if len(config_dict['c']) > 1:\n config_dict['c'] = config_dict['c'][0]\n try:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=y_errors, xerr=x_errors, **config_dict)\n except (IndexError, TypeError):\n return self.errorbar_grid_list(grid_list=grid)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(grid_list) == 0:\n return\n color = itertools.cycle(self.config_dict['c'])\n config_dict = self.config_dict\n config_dict.pop('c')\n try:\n for grid in grid_list:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), c=next(color), **config_dict)\n except IndexError:\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n config_dict = self.config_dict\n config_dict.pop('c')\n plt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap)\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), zorder=0.0, **self.config_dict)\n<|end_body_2|>\n", "revision_id": "6639dd86d21ea28e942155753ec556752735b4e4", "skeleton": "<|skeleton|>\nclass GridErrorbar:\n \"\"\"Plots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.\"\"\"\n\n def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).\"\"\"\n <|body_0|>\n\n def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.\"\"\"\n <|body_1|>\n\n def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GridErrorbar:\n \"\"\"Plots an input set of grid points with 2D errors, for example (y,x) coordinates or data structures representing 2D (y,x) coordinates like a `Grid2D` or `Grid2DIrregular`. Multiple lists of (y,x) coordinates are plotted with varying colors. This object wraps the following Matplotlib methods: - plt.errorbar: https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.errorbar.html Parameters ---------- colors : [str] The color or list of colors that the grid is plotted using. For plotting indexes or a grid list, a list of colors can be specified which the plot cycles through.\"\"\"\n\n def errorbar_grid(self, grid: Union[np.ndarray, Grid2D], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. y_errors The y values of the error on every point of the grid that is plotted (e.g. vertically). x_errors The x values of the error on every point of the grid that is plotted (e.g. horizontally).\"\"\"\n config_dict = self.config_dict\n if len(config_dict['c']) > 1:\n config_dict['c'] = config_dict['c'][0]\n try:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=y_errors, xerr=x_errors, **config_dict)\n except (IndexError, TypeError):\n return self.errorbar_grid_list(grid_list=grid)\n\n def errorbar_grid_list(self, grid_list: Union[List[Grid2D], List[Grid2DIrregular]], y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input list of grids of (y,x) coordinates using the matplotlib method `plt.errorbar`. The (y,x) coordinates are plotted as dots, with a line / cross for its errors. This method colors each grid in each entry of the list the same, so that the different grids are visible in the plot. Parameters ---------- grid_list The list of grids of (y,x) coordinates that are plotted.\"\"\"\n if len(grid_list) == 0:\n return\n color = itertools.cycle(self.config_dict['c'])\n config_dict = self.config_dict\n config_dict.pop('c')\n try:\n for grid in grid_list:\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), c=next(color), **config_dict)\n except IndexError:\n return None\n\n def errorbar_grid_colored(self, grid: Union[np.ndarray, Grid2D], color_array: np.ndarray, cmap: str, y_errors: Optional[Union[np.ndarray, List]]=None, x_errors: Optional[Union[np.ndarray, List]]=None):\n \"\"\"Plot an input grid of (y,x) coordinates using the matplotlib method `plt.errorbar`. The method colors the errorbared grid according to an input ndarray of color values, using an input colormap. Parameters ---------- grid : Grid2D The grid of (y,x) coordinates that is plotted. color_array : ndarray The array of RGB color values used to color the grid. cmap The Matplotlib colormap used for the grid point coloring.\"\"\"\n config_dict = self.config_dict\n config_dict.pop('c')\n plt.scatter(y=grid[:, 0], x=grid[:, 1], c=color_array, cmap=cmap)\n plt.errorbar(y=grid[:, 0], x=grid[:, 1], yerr=np.asarray(y_errors), xerr=np.asarray(x_errors), zorder=0.0, **self.config_dict)\n", "source": "the_stack_v2_python_sparse", "source_path": "autoarray/plot/wrap/two_d/grid_errorbar.py", "source_repo": "Jammy2211/PyAutoArray", "split": "test", "star_events_count": 6} {"blob_id": "d0d165efcf9f7fdd5e32e3c43f664f0a2aefa720", "bodies": ["encoded, seq_length = self.encoder(inputs, seq_length)\nprobs = self.decoder(encoded, seq_length)\nreturn probs", "with tf.name_scope('compute_loss'):\n iw = float(self.conf['insertion_weight'])\n up = float(self.conf['upper_prob'])\n lp = float(self.conf['lower_prob'])\n iloss = iw * tf.reduce_mean(tf.reduce_sum((1 - targets) * tf.maximum(probs - lp, 0) ** 2, 1))\n dloss = tf.reduce_mean(tf.reduce_sum(targets * tf.maximum(up - probs, 0) ** 2, 1))\n loss = dloss + iloss\nreturn loss", "with tf.variable_scope('encoder'):\n encoded = tf.identity(features, 'features')\n seq_length = tf.identity(seq_length, 'input_seq_length')\n for l in range(int(self.conf['numlayers_encoder'])):\n with tf.variable_scope('layer%d' % l):\n num_units = int(self.conf['numunits_encoder'])\n fw = tf.contrib.rnn.GRUCell(num_units)\n bw = tf.contrib.rnn.GRUCell(num_units)\n encoded, _ = tf.nn.bidirectional_dynamic_rnn(fw, bw, encoded, dtype=tf.float32, sequence_length=seq_length)\n encoded = tf.concat(encoded, 2)\n if l != int(self.conf['numlayers_encoder']) - 1:\n with tf.name_scope('sub-sample'):\n encoded = encoded[:, ::int(self.conf['subsample'])]\n seq_length = tf.to_int32(tf.ceil(tf.to_float(seq_length) / float(self.conf['subsample'])))\n encoded = tf.identity(encoded, 'encoded')\n seq_length = tf.identity(seq_length, 'output_seq_length')\nreturn (encoded, seq_length)", "with tf.variable_scope('decoder'):\n mask = tf.sequence_mask(seq_length, tf.shape(encoded)[1])\n mask = tf.tile(mask[:, :, tf.newaxis], [1, 1, encoded.shape[-1].value])\n encoded = tf.where(mask, encoded, tf.ones_like(encoded) * encoded.dtype.min)\n outputs = tf.reduce_max(encoded, 1)\n outputs = tf.layers.dense(outputs, int(self.conf['numunits_decoder']), tf.nn.relu)\n outputs = tf.layers.dense(outputs, self.coder.numlabels, tf.nn.sigmoid)\n return outputs"], "bodies_text": "<|body_start_0|>\n encoded, seq_length = self.encoder(inputs, seq_length)\n probs = self.decoder(encoded, seq_length)\n return probs\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.name_scope('compute_loss'):\n iw = float(self.conf['insertion_weight'])\n up = float(self.conf['upper_prob'])\n lp = float(self.conf['lower_prob'])\n iloss = iw * tf.reduce_mean(tf.reduce_sum((1 - targets) * tf.maximum(probs - lp, 0) ** 2, 1))\n dloss = tf.reduce_mean(tf.reduce_sum(targets * tf.maximum(up - probs, 0) ** 2, 1))\n loss = dloss + iloss\n return loss\n<|end_body_1|>\n\n<|body_start_2|>\n with tf.variable_scope('encoder'):\n encoded = tf.identity(features, 'features')\n seq_length = tf.identity(seq_length, 'input_seq_length')\n for l in range(int(self.conf['numlayers_encoder'])):\n with tf.variable_scope('layer%d' % l):\n num_units = int(self.conf['numunits_encoder'])\n fw = tf.contrib.rnn.GRUCell(num_units)\n bw = tf.contrib.rnn.GRUCell(num_units)\n encoded, _ = tf.nn.bidirectional_dynamic_rnn(fw, bw, encoded, dtype=tf.float32, sequence_length=seq_length)\n encoded = tf.concat(encoded, 2)\n if l != int(self.conf['numlayers_encoder']) - 1:\n with tf.name_scope('sub-sample'):\n encoded = encoded[:, ::int(self.conf['subsample'])]\n seq_length = tf.to_int32(tf.ceil(tf.to_float(seq_length) / float(self.conf['subsample'])))\n encoded = tf.identity(encoded, 'encoded')\n seq_length = tf.identity(seq_length, 'output_seq_length')\n return (encoded, seq_length)\n<|end_body_2|>\n\n<|body_start_3|>\n with tf.variable_scope('decoder'):\n mask = tf.sequence_mask(seq_length, tf.shape(encoded)[1])\n mask = tf.tile(mask[:, :, tf.newaxis], [1, 1, encoded.shape[-1].value])\n encoded = tf.where(mask, encoded, tf.ones_like(encoded) * encoded.dtype.min)\n outputs = tf.reduce_max(encoded, 1)\n outputs = tf.layers.dense(outputs, int(self.conf['numunits_decoder']), tf.nn.relu)\n outputs = tf.layers.dense(outputs, self.coder.numlabels, tf.nn.sigmoid)\n return outputs\n<|end_body_3|>\n", "class_docstring": "an encoder-decoder with dynamic routing acquisition model", "class_name": "EncoderDecoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EncoderDecoder:\n \"\"\"an encoder-decoder with dynamic routing acquisition model\"\"\"\n\n def model(self, inputs, seq_length):\n \"\"\"apply the model\"\"\"\n <|body_0|>\n\n def loss(self, targets, probs):\n \"\"\"compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss\"\"\"\n <|body_1|>\n\n def encoder(self, features, seq_length):\n \"\"\"encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths\"\"\"\n <|body_2|>\n\n def decoder(self, encoded, seq_length):\n \"\"\"decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n encoded, seq_length = self.encoder(inputs, seq_length)\n probs = self.decoder(encoded, seq_length)\n return probs\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.name_scope('compute_loss'):\n iw = float(self.conf['insertion_weight'])\n up = float(self.conf['upper_prob'])\n lp = float(self.conf['lower_prob'])\n iloss = iw * tf.reduce_mean(tf.reduce_sum((1 - targets) * tf.maximum(probs - lp, 0) ** 2, 1))\n dloss = tf.reduce_mean(tf.reduce_sum(targets * tf.maximum(up - probs, 0) ** 2, 1))\n loss = dloss + iloss\n return loss\n<|end_body_1|>\n\n<|body_start_2|>\n with tf.variable_scope('encoder'):\n encoded = tf.identity(features, 'features')\n seq_length = tf.identity(seq_length, 'input_seq_length')\n for l in range(int(self.conf['numlayers_encoder'])):\n with tf.variable_scope('layer%d' % l):\n num_units = int(self.conf['numunits_encoder'])\n fw = tf.contrib.rnn.GRUCell(num_units)\n bw = tf.contrib.rnn.GRUCell(num_units)\n encoded, _ = tf.nn.bidirectional_dynamic_rnn(fw, bw, encoded, dtype=tf.float32, sequence_length=seq_length)\n encoded = tf.concat(encoded, 2)\n if l != int(self.conf['numlayers_encoder']) - 1:\n with tf.name_scope('sub-sample'):\n encoded = encoded[:, ::int(self.conf['subsample'])]\n seq_length = tf.to_int32(tf.ceil(tf.to_float(seq_length) / float(self.conf['subsample'])))\n encoded = tf.identity(encoded, 'encoded')\n seq_length = tf.identity(seq_length, 'output_seq_length')\n return (encoded, seq_length)\n<|end_body_2|>\n\n<|body_start_3|>\n with tf.variable_scope('decoder'):\n mask = tf.sequence_mask(seq_length, tf.shape(encoded)[1])\n mask = tf.tile(mask[:, :, tf.newaxis], [1, 1, encoded.shape[-1].value])\n encoded = tf.where(mask, encoded, tf.ones_like(encoded) * encoded.dtype.min)\n outputs = tf.reduce_max(encoded, 1)\n outputs = tf.layers.dense(outputs, int(self.conf['numunits_decoder']), tf.nn.relu)\n outputs = tf.layers.dense(outputs, self.coder.numlabels, tf.nn.sigmoid)\n return outputs\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000273", "length_bytes": 3712, "license_type": "no_license", "methods": [{"docstring": "apply the model", "name": "model", "signature": "def model(self, inputs, seq_length)"}, {"docstring": "compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss", "name": "loss", "signature": "def loss(self, targets, probs)"}, {"docstring": "encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths", "name": "encoder", "signature": "def encoder(self, features, seq_length)"}, {"docstring": "decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]", "name": "decoder", "signature": "def decoder(self, encoded, seq_length)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_024050", "prompt": "Implement the Python class `EncoderDecoder` described below.\n\nClass description:\nan encoder-decoder with dynamic routing acquisition model\n\nMethod signatures and docstrings:\n- def model(self, inputs, seq_length): apply the model\n- def loss(self, targets, probs): compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss\n- def encoder(self, features, seq_length): encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths\n- def decoder(self, encoded, seq_length): decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]", "prompted_full_text": "Implement the Python class `EncoderDecoder` described below.\n\nClass description:\nan encoder-decoder with dynamic routing acquisition model\n\nMethod signatures and docstrings:\n- def model(self, inputs, seq_length): apply the model\n- def loss(self, targets, probs): compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss\n- def encoder(self, features, seq_length): encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths\n- def decoder(self, encoded, seq_length): decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]\n\n<|skeleton|>\nclass EncoderDecoder:\n \"\"\"an encoder-decoder with dynamic routing acquisition model\"\"\"\n\n def model(self, inputs, seq_length):\n \"\"\"apply the model\"\"\"\n <|body_0|>\n\n def loss(self, targets, probs):\n \"\"\"compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss\"\"\"\n <|body_1|>\n\n def encoder(self, features, seq_length):\n \"\"\"encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths\"\"\"\n <|body_2|>\n\n def decoder(self, encoded, seq_length):\n \"\"\"decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n encoded, seq_length = self.encoder(inputs, seq_length)\n probs = self.decoder(encoded, seq_length)\n return probs\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.name_scope('compute_loss'):\n iw = float(self.conf['insertion_weight'])\n up = float(self.conf['upper_prob'])\n lp = float(self.conf['lower_prob'])\n iloss = iw * tf.reduce_mean(tf.reduce_sum((1 - targets) * tf.maximum(probs - lp, 0) ** 2, 1))\n dloss = tf.reduce_mean(tf.reduce_sum(targets * tf.maximum(up - probs, 0) ** 2, 1))\n loss = dloss + iloss\n return loss\n<|end_body_1|>\n\n<|body_start_2|>\n with tf.variable_scope('encoder'):\n encoded = tf.identity(features, 'features')\n seq_length = tf.identity(seq_length, 'input_seq_length')\n for l in range(int(self.conf['numlayers_encoder'])):\n with tf.variable_scope('layer%d' % l):\n num_units = int(self.conf['numunits_encoder'])\n fw = tf.contrib.rnn.GRUCell(num_units)\n bw = tf.contrib.rnn.GRUCell(num_units)\n encoded, _ = tf.nn.bidirectional_dynamic_rnn(fw, bw, encoded, dtype=tf.float32, sequence_length=seq_length)\n encoded = tf.concat(encoded, 2)\n if l != int(self.conf['numlayers_encoder']) - 1:\n with tf.name_scope('sub-sample'):\n encoded = encoded[:, ::int(self.conf['subsample'])]\n seq_length = tf.to_int32(tf.ceil(tf.to_float(seq_length) / float(self.conf['subsample'])))\n encoded = tf.identity(encoded, 'encoded')\n seq_length = tf.identity(seq_length, 'output_seq_length')\n return (encoded, seq_length)\n<|end_body_2|>\n\n<|body_start_3|>\n with tf.variable_scope('decoder'):\n mask = tf.sequence_mask(seq_length, tf.shape(encoded)[1])\n mask = tf.tile(mask[:, :, tf.newaxis], [1, 1, encoded.shape[-1].value])\n encoded = tf.where(mask, encoded, tf.ones_like(encoded) * encoded.dtype.min)\n outputs = tf.reduce_max(encoded, 1)\n outputs = tf.layers.dense(outputs, int(self.conf['numunits_decoder']), tf.nn.relu)\n outputs = tf.layers.dense(outputs, self.coder.numlabels, tf.nn.sigmoid)\n return outputs\n<|end_body_3|>\n", "revision_id": "fcbe609505f86f142cc6e78686e5c25b0e58e178", "skeleton": "<|skeleton|>\nclass EncoderDecoder:\n \"\"\"an encoder-decoder with dynamic routing acquisition model\"\"\"\n\n def model(self, inputs, seq_length):\n \"\"\"apply the model\"\"\"\n <|body_0|>\n\n def loss(self, targets, probs):\n \"\"\"compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss\"\"\"\n <|body_1|>\n\n def encoder(self, features, seq_length):\n \"\"\"encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths\"\"\"\n <|body_2|>\n\n def decoder(self, encoded, seq_length):\n \"\"\"decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EncoderDecoder:\n \"\"\"an encoder-decoder with dynamic routing acquisition model\"\"\"\n\n def model(self, inputs, seq_length):\n \"\"\"apply the model\"\"\"\n encoded, seq_length = self.encoder(inputs, seq_length)\n probs = self.decoder(encoded, seq_length)\n return probs\n\n def loss(self, targets, probs):\n \"\"\"compute the loss args: targets: the reference targets probs: the label probabilities returns: the loss\"\"\"\n with tf.name_scope('compute_loss'):\n iw = float(self.conf['insertion_weight'])\n up = float(self.conf['upper_prob'])\n lp = float(self.conf['lower_prob'])\n iloss = iw * tf.reduce_mean(tf.reduce_sum((1 - targets) * tf.maximum(probs - lp, 0) ** 2, 1))\n dloss = tf.reduce_mean(tf.reduce_sum(targets * tf.maximum(up - probs, 0) ** 2, 1))\n loss = dloss + iloss\n return loss\n\n def encoder(self, features, seq_length):\n \"\"\"encode the input features args: features: a [N x T x F] tensor seq_length: an [N] tensor containing the sequence lengths returns: - the encoded features - the encode features sequence lengths\"\"\"\n with tf.variable_scope('encoder'):\n encoded = tf.identity(features, 'features')\n seq_length = tf.identity(seq_length, 'input_seq_length')\n for l in range(int(self.conf['numlayers_encoder'])):\n with tf.variable_scope('layer%d' % l):\n num_units = int(self.conf['numunits_encoder'])\n fw = tf.contrib.rnn.GRUCell(num_units)\n bw = tf.contrib.rnn.GRUCell(num_units)\n encoded, _ = tf.nn.bidirectional_dynamic_rnn(fw, bw, encoded, dtype=tf.float32, sequence_length=seq_length)\n encoded = tf.concat(encoded, 2)\n if l != int(self.conf['numlayers_encoder']) - 1:\n with tf.name_scope('sub-sample'):\n encoded = encoded[:, ::int(self.conf['subsample'])]\n seq_length = tf.to_int32(tf.ceil(tf.to_float(seq_length) / float(self.conf['subsample'])))\n encoded = tf.identity(encoded, 'encoded')\n seq_length = tf.identity(seq_length, 'output_seq_length')\n return (encoded, seq_length)\n\n def decoder(self, encoded, seq_length):\n \"\"\"decode the encoded features args: encoded: a [N x T x F] tensor seq_length: encoded sequence length returns: - the label probabilities [B x L]\"\"\"\n with tf.variable_scope('decoder'):\n mask = tf.sequence_mask(seq_length, tf.shape(encoded)[1])\n mask = tf.tile(mask[:, :, tf.newaxis], [1, 1, encoded.shape[-1].value])\n encoded = tf.where(mask, encoded, tf.ones_like(encoded) * encoded.dtype.min)\n outputs = tf.reduce_max(encoded, 1)\n outputs = tf.layers.dense(outputs, int(self.conf['numunits_decoder']), tf.nn.relu)\n outputs = tf.layers.dense(outputs, self.coder.numlabels, tf.nn.sigmoid)\n return outputs\n", "source": "the_stack_v2_python_sparse", "source_path": "assist/acquisition/tfmodel/encoder_decoder.py", "source_repo": "GillesDepypere/assist", "split": "test", "star_events_count": 1} {"blob_id": "a6d618bdd10cf1ef7f19c2134eaacb8f966b9442", "bodies": ["assert len(set(ms)) == 1\nVp = [list(eigen_basis(m)) for m in ms]\nVb = [q.subs(x, s) for q in eigen_basis(n)]\nQ = [mu.subs(x, s) for mu in eigen_basis(r)]\nCoupledProblem.__init__(self, Vp, Vb, Q, beam)\nself.params = params", "if isinstance(self.beam, LineBeam):\n dim = max(self.n, self.r)\n Bb = eigen_poisson.mass_matrix(dim)\n Bb = Bb[:self.n, :self.r]\n Bb *= float(self.beam.Jac)\nelse:\n Bb = CoupledProblem.Bb_matrix(self)\nreturn Bb", "if norm is None:\n return np.eye(self.r)\nif isinstance(self.beam, LineBeam):\n diag = np.array([(pi / 2 + k * pi / 2) ** 2 for k in range(self.r)], dtype='float')\n C = np.diag(diag ** norm)\n J = self.beam.Jac\n J = J ** (1 - 2 * norm)\n C *= float(J)\nelse:\n C = CoupledProblem.C_matrix(self, norm)\nreturn C"], "bodies_text": "<|body_start_0|>\n assert len(set(ms)) == 1\n Vp = [list(eigen_basis(m)) for m in ms]\n Vb = [q.subs(x, s) for q in eigen_basis(n)]\n Q = [mu.subs(x, s) for mu in eigen_basis(r)]\n CoupledProblem.__init__(self, Vp, Vb, Q, beam)\n self.params = params\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(self.beam, LineBeam):\n dim = max(self.n, self.r)\n Bb = eigen_poisson.mass_matrix(dim)\n Bb = Bb[:self.n, :self.r]\n Bb *= float(self.beam.Jac)\n else:\n Bb = CoupledProblem.Bb_matrix(self)\n return Bb\n<|end_body_1|>\n\n<|body_start_2|>\n if norm is None:\n return np.eye(self.r)\n if isinstance(self.beam, LineBeam):\n diag = np.array([(pi / 2 + k * pi / 2) ** 2 for k in range(self.r)], dtype='float')\n C = np.diag(diag ** norm)\n J = self.beam.Jac\n J = J ** (1 - 2 * norm)\n C *= float(J)\n else:\n C = CoupledProblem.C_matrix(self, norm)\n return C\n<|end_body_2|>\n", "class_docstring": "Parent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis", "class_name": "CoupledEigen", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CoupledEigen:\n \"\"\"Parent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis\"\"\"\n\n def __init__(self, ms, n, r, beam, params):\n \"\"\"Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.\"\"\"\n <|body_0|>\n\n def Bb_matrix(self):\n \"\"\"Matrix of the constraint on the beam\"\"\"\n <|body_1|>\n\n def C_matrix(self, norm):\n \"\"\"H^norm matrices of Q\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert len(set(ms)) == 1\n Vp = [list(eigen_basis(m)) for m in ms]\n Vb = [q.subs(x, s) for q in eigen_basis(n)]\n Q = [mu.subs(x, s) for mu in eigen_basis(r)]\n CoupledProblem.__init__(self, Vp, Vb, Q, beam)\n self.params = params\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(self.beam, LineBeam):\n dim = max(self.n, self.r)\n Bb = eigen_poisson.mass_matrix(dim)\n Bb = Bb[:self.n, :self.r]\n Bb *= float(self.beam.Jac)\n else:\n Bb = CoupledProblem.Bb_matrix(self)\n return Bb\n<|end_body_1|>\n\n<|body_start_2|>\n if norm is None:\n return np.eye(self.r)\n if isinstance(self.beam, LineBeam):\n diag = np.array([(pi / 2 + k * pi / 2) ** 2 for k in range(self.r)], dtype='float')\n C = np.diag(diag ** norm)\n J = self.beam.Jac\n J = J ** (1 - 2 * norm)\n C *= float(J)\n else:\n C = CoupledProblem.C_matrix(self, norm)\n return C\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000274", "length_bytes": 2158, "license_type": "no_license", "methods": [{"docstring": "Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.", "name": "__init__", "signature": "def __init__(self, ms, n, r, beam, params)"}, {"docstring": "Matrix of the constraint on the beam", "name": "Bb_matrix", "signature": "def Bb_matrix(self)"}, {"docstring": "H^norm matrices of Q", "name": "C_matrix", "signature": "def C_matrix(self, norm)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000532", "prompt": "Implement the Python class `CoupledEigen` described below.\n\nClass description:\nParent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis\n\nMethod signatures and docstrings:\n- def __init__(self, ms, n, r, beam, params): Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.\n- def Bb_matrix(self): Matrix of the constraint on the beam\n- def C_matrix(self, norm): H^norm matrices of Q", "prompted_full_text": "Implement the Python class `CoupledEigen` described below.\n\nClass description:\nParent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis\n\nMethod signatures and docstrings:\n- def __init__(self, ms, n, r, beam, params): Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.\n- def Bb_matrix(self): Matrix of the constraint on the beam\n- def C_matrix(self, norm): H^norm matrices of Q\n\n<|skeleton|>\nclass CoupledEigen:\n \"\"\"Parent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis\"\"\"\n\n def __init__(self, ms, n, r, beam, params):\n \"\"\"Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.\"\"\"\n <|body_0|>\n\n def Bb_matrix(self):\n \"\"\"Matrix of the constraint on the beam\"\"\"\n <|body_1|>\n\n def C_matrix(self, norm):\n \"\"\"H^norm matrices of Q\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert len(set(ms)) == 1\n Vp = [list(eigen_basis(m)) for m in ms]\n Vb = [q.subs(x, s) for q in eigen_basis(n)]\n Q = [mu.subs(x, s) for mu in eigen_basis(r)]\n CoupledProblem.__init__(self, Vp, Vb, Q, beam)\n self.params = params\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(self.beam, LineBeam):\n dim = max(self.n, self.r)\n Bb = eigen_poisson.mass_matrix(dim)\n Bb = Bb[:self.n, :self.r]\n Bb *= float(self.beam.Jac)\n else:\n Bb = CoupledProblem.Bb_matrix(self)\n return Bb\n<|end_body_1|>\n\n<|body_start_2|>\n if norm is None:\n return np.eye(self.r)\n if isinstance(self.beam, LineBeam):\n diag = np.array([(pi / 2 + k * pi / 2) ** 2 for k in range(self.r)], dtype='float')\n C = np.diag(diag ** norm)\n J = self.beam.Jac\n J = J ** (1 - 2 * norm)\n C *= float(J)\n else:\n C = CoupledProblem.C_matrix(self, norm)\n return C\n<|end_body_2|>\n", "revision_id": "2fb3686804e836d4031fbf231a36a0f9ac8a3012", "skeleton": "<|skeleton|>\nclass CoupledEigen:\n \"\"\"Parent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis\"\"\"\n\n def __init__(self, ms, n, r, beam, params):\n \"\"\"Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.\"\"\"\n <|body_0|>\n\n def Bb_matrix(self):\n \"\"\"Matrix of the constraint on the beam\"\"\"\n <|body_1|>\n\n def C_matrix(self, norm):\n \"\"\"H^norm matrices of Q\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CoupledEigen:\n \"\"\"Parent for coupled problems with spaces Vp, Vb, Q are spanned by functions from eigenbasis\"\"\"\n\n def __init__(self, ms, n, r, beam, params):\n \"\"\"Solver with ms[i] functions for i-th comp of Vp, n for Vb and r for Q.\"\"\"\n assert len(set(ms)) == 1\n Vp = [list(eigen_basis(m)) for m in ms]\n Vb = [q.subs(x, s) for q in eigen_basis(n)]\n Q = [mu.subs(x, s) for mu in eigen_basis(r)]\n CoupledProblem.__init__(self, Vp, Vb, Q, beam)\n self.params = params\n\n def Bb_matrix(self):\n \"\"\"Matrix of the constraint on the beam\"\"\"\n if isinstance(self.beam, LineBeam):\n dim = max(self.n, self.r)\n Bb = eigen_poisson.mass_matrix(dim)\n Bb = Bb[:self.n, :self.r]\n Bb *= float(self.beam.Jac)\n else:\n Bb = CoupledProblem.Bb_matrix(self)\n return Bb\n\n def C_matrix(self, norm):\n \"\"\"H^norm matrices of Q\"\"\"\n if norm is None:\n return np.eye(self.r)\n if isinstance(self.beam, LineBeam):\n diag = np.array([(pi / 2 + k * pi / 2) ** 2 for k in range(self.r)], dtype='float')\n C = np.diag(diag ** norm)\n J = self.beam.Jac\n J = J ** (1 - 2 * norm)\n C *= float(J)\n else:\n C = CoupledProblem.C_matrix(self, norm)\n return C\n", "source": "the_stack_v2_python_sparse", "source_path": "kent-report/py/coupled_eigen.py", "source_repo": "MiroK/cutFEM-beam", "split": "test", "star_events_count": 0} {"blob_id": "74c3421712d658d3d0f656ead4382a1a05bacf19", "bodies": ["if isinstance(info, dict):\n info = VirtualMachine.VirtualMachineError.FormatDebugInfo(info, error_message)\n return cls(info)\nraise TypeError('The argument of FromDebugInfo should be an instance of dictionary.')", "sep = '\\n%s\\n' % ('-' * 65)\n\ndef AddHeader(error, header, message):\n error += '{sep}{header}\\n{message}\\n'.format(sep=sep, header=header, message=message)\n return error\n\ndef AddKeyIfExists(result, header, key):\n if key in info:\n result = AddHeader(result, header, info[key])\n del info[key]\n return result\nresult = AddHeader('', 'error_message:', error_message) if error_message else ''\nresult = AddKeyIfExists(result, 'traceroute:', 'traceroute')\nreturn AddHeader(result, 'Debug Info:', pprint.pformat(info))"], "bodies_text": "<|body_start_0|>\n if isinstance(info, dict):\n info = VirtualMachine.VirtualMachineError.FormatDebugInfo(info, error_message)\n return cls(info)\n raise TypeError('The argument of FromDebugInfo should be an instance of dictionary.')\n<|end_body_0|>\n\n<|body_start_1|>\n sep = '\\n%s\\n' % ('-' * 65)\n\n def AddHeader(error, header, message):\n error += '{sep}{header}\\n{message}\\n'.format(sep=sep, header=header, message=message)\n return error\n\n def AddKeyIfExists(result, header, key):\n if key in info:\n result = AddHeader(result, header, info[key])\n del info[key]\n return result\n result = AddHeader('', 'error_message:', error_message) if error_message else ''\n result = AddKeyIfExists(result, 'traceroute:', 'traceroute')\n return AddHeader(result, 'Debug Info:', pprint.pformat(info))\n<|end_body_1|>\n", "class_docstring": "An error raised when VM is having an issue.", "class_name": "VirtualMachineError", "detected_licenses": ["Classpath-exception-2.0", "BSD-3-Clause", "AGPL-3.0-only", "MIT", "GPL-2.0-only", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VirtualMachineError:\n \"\"\"An error raised when VM is having an issue.\"\"\"\n\n def FromDebugInfo(cls, info, error_message):\n \"\"\"Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.\"\"\"\n <|body_0|>\n\n def FormatDebugInfo(info, error_message):\n \"\"\"A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(info, dict):\n info = VirtualMachine.VirtualMachineError.FormatDebugInfo(info, error_message)\n return cls(info)\n raise TypeError('The argument of FromDebugInfo should be an instance of dictionary.')\n<|end_body_0|>\n\n<|body_start_1|>\n sep = '\\n%s\\n' % ('-' * 65)\n\n def AddHeader(error, header, message):\n error += '{sep}{header}\\n{message}\\n'.format(sep=sep, header=header, message=message)\n return error\n\n def AddKeyIfExists(result, header, key):\n if key in info:\n result = AddHeader(result, header, info[key])\n del info[key]\n return result\n result = AddHeader('', 'error_message:', error_message) if error_message else ''\n result = AddKeyIfExists(result, 'traceroute:', 'traceroute')\n return AddHeader(result, 'Debug Info:', pprint.pformat(info))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000275", "length_bytes": 6889, "license_type": "permissive", "methods": [{"docstring": "Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.", "name": "FromDebugInfo", "signature": "def FromDebugInfo(cls, info, error_message)"}, {"docstring": "A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.", "name": "FormatDebugInfo", "signature": "def FormatDebugInfo(info, error_message)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_018373", "prompt": "Implement the Python class `VirtualMachineError` described below.\n\nClass description:\nAn error raised when VM is having an issue.\n\nMethod signatures and docstrings:\n- def FromDebugInfo(cls, info, error_message): Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.\n- def FormatDebugInfo(info, error_message): A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.", "prompted_full_text": "Implement the Python class `VirtualMachineError` described below.\n\nClass description:\nAn error raised when VM is having an issue.\n\nMethod signatures and docstrings:\n- def FromDebugInfo(cls, info, error_message): Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.\n- def FormatDebugInfo(info, error_message): A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.\n\n<|skeleton|>\nclass VirtualMachineError:\n \"\"\"An error raised when VM is having an issue.\"\"\"\n\n def FromDebugInfo(cls, info, error_message):\n \"\"\"Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.\"\"\"\n <|body_0|>\n\n def FormatDebugInfo(info, error_message):\n \"\"\"A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if isinstance(info, dict):\n info = VirtualMachine.VirtualMachineError.FormatDebugInfo(info, error_message)\n return cls(info)\n raise TypeError('The argument of FromDebugInfo should be an instance of dictionary.')\n<|end_body_0|>\n\n<|body_start_1|>\n sep = '\\n%s\\n' % ('-' * 65)\n\n def AddHeader(error, header, message):\n error += '{sep}{header}\\n{message}\\n'.format(sep=sep, header=header, message=message)\n return error\n\n def AddKeyIfExists(result, header, key):\n if key in info:\n result = AddHeader(result, header, info[key])\n del info[key]\n return result\n result = AddHeader('', 'error_message:', error_message) if error_message else ''\n result = AddKeyIfExists(result, 'traceroute:', 'traceroute')\n return AddHeader(result, 'Debug Info:', pprint.pformat(info))\n<|end_body_1|>\n", "revision_id": "d0699f32998898757b036704fba39e5471641f01", "skeleton": "<|skeleton|>\nclass VirtualMachineError:\n \"\"\"An error raised when VM is having an issue.\"\"\"\n\n def FromDebugInfo(cls, info, error_message):\n \"\"\"Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.\"\"\"\n <|body_0|>\n\n def FormatDebugInfo(info, error_message):\n \"\"\"A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VirtualMachineError:\n \"\"\"An error raised when VM is having an issue.\"\"\"\n\n def FromDebugInfo(cls, info, error_message):\n \"\"\"Create VirtualMachineError class from debug information. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: a cls exception class Raises: TypeError: if info is not an instance of dictionary.\"\"\"\n if isinstance(info, dict):\n info = VirtualMachine.VirtualMachineError.FormatDebugInfo(info, error_message)\n return cls(info)\n raise TypeError('The argument of FromDebugInfo should be an instance of dictionary.')\n\n def FormatDebugInfo(info, error_message):\n \"\"\"A function to return a string in human readable format. Args: info: A dictionary containing debug information (such as traceroute info). error_message: the error message from the originating code. Returns: A human readable string of debug information.\"\"\"\n sep = '\\n%s\\n' % ('-' * 65)\n\n def AddHeader(error, header, message):\n error += '{sep}{header}\\n{message}\\n'.format(sep=sep, header=header, message=message)\n return error\n\n def AddKeyIfExists(result, header, key):\n if key in info:\n result = AddHeader(result, header, info[key])\n del info[key]\n return result\n result = AddHeader('', 'error_message:', error_message) if error_message else ''\n result = AddKeyIfExists(result, 'traceroute:', 'traceroute')\n return AddHeader(result, 'Debug Info:', pprint.pformat(info))\n", "source": "the_stack_v2_python_sparse", "source_path": "perfkitbenchmarker/errors.py", "source_repo": "GoogleCloudPlatform/PerfKitBenchmarker", "split": "test", "star_events_count": 1923} {"blob_id": "cf0c5e3ddaecb2f9fd25dcafc1c660631e65a42d", "bodies": ["if self.has_permission('RightTPI') is False:\n self.no_access()\nwith Database() as db:\n if id_survey is None:\n data = db.query(Table).all()\n else:\n data = db.query(Table).get(id_survey)\nreturn {'data': data}", "if self.has_permission('RightTPI') is False:\n self.no_access()\nid_survey = uuid.uuid4()\nid_language_content = MultiLang.set(body['name'], True)\nwith Database() as db:\n db.insert(Table(id_survey, id_language_content, body['survey_type']))\n db.commit()\nreturn {'id_survey': id_survey, 'message': 'survey successfully created'}", "if self.has_permission('RightTPI') is False:\n self.no_access()\nif 'id_survey' not in body:\n raise Exception('You need to pass a id_survey')\nwith Database() as db:\n inspection = db.query(Inspection).filter(Inspection.id_survey == body['id_survey'], Inspection.is_completed == True).all()\n if len(inspection) > 0:\n self.remove(body['id_survey'])\n self.create(body)\n else:\n data = db.query(Table).get(body['id_survey'])\n if 'name' in body:\n data.id_language_content_name = MultiLang.set(body['name'])\n if 'survey_type' in body:\n data.survey_type = body['survey_type']\n if 'is_active' in body:\n data.is_active = body['is_active']\n db.commit()\nreturn {'message': 'survey successfully modified'}", "if self.has_permission('RightTPI') is False:\n self.no_access()\nwith Database() as db:\n data = db.query(Table).get(id_survey)\n data.is_active = False\n db.commit()\nreturn {'message': 'survey successfully removed'}"], "bodies_text": "<|body_start_0|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n if id_survey is None:\n data = db.query(Table).all()\n else:\n data = db.query(Table).get(id_survey)\n return {'data': data}\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n id_survey = uuid.uuid4()\n id_language_content = MultiLang.set(body['name'], True)\n with Database() as db:\n db.insert(Table(id_survey, id_language_content, body['survey_type']))\n db.commit()\n return {'id_survey': id_survey, 'message': 'survey successfully created'}\n<|end_body_1|>\n\n<|body_start_2|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n if 'id_survey' not in body:\n raise Exception('You need to pass a id_survey')\n with Database() as db:\n inspection = db.query(Inspection).filter(Inspection.id_survey == body['id_survey'], Inspection.is_completed == True).all()\n if len(inspection) > 0:\n self.remove(body['id_survey'])\n self.create(body)\n else:\n data = db.query(Table).get(body['id_survey'])\n if 'name' in body:\n data.id_language_content_name = MultiLang.set(body['name'])\n if 'survey_type' in body:\n data.survey_type = body['survey_type']\n if 'is_active' in body:\n data.is_active = body['is_active']\n db.commit()\n return {'message': 'survey successfully modified'}\n<|end_body_2|>\n\n<|body_start_3|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n data = db.query(Table).get(id_survey)\n data.is_active = False\n db.commit()\n return {'message': 'survey successfully removed'}\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Survey", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Survey:\n\n def get(self, id_survey=None):\n \"\"\"Return the survey information :param id_survey: UUID\"\"\"\n <|body_0|>\n\n def create(self, body):\n \"\"\"Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }\"\"\"\n <|body_1|>\n\n def modify(self, body):\n \"\"\"Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }\"\"\"\n <|body_2|>\n\n def remove(self, id_survey):\n \"\"\"Remove a survey :param id_survey: UUID\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n if id_survey is None:\n data = db.query(Table).all()\n else:\n data = db.query(Table).get(id_survey)\n return {'data': data}\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n id_survey = uuid.uuid4()\n id_language_content = MultiLang.set(body['name'], True)\n with Database() as db:\n db.insert(Table(id_survey, id_language_content, body['survey_type']))\n db.commit()\n return {'id_survey': id_survey, 'message': 'survey successfully created'}\n<|end_body_1|>\n\n<|body_start_2|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n if 'id_survey' not in body:\n raise Exception('You need to pass a id_survey')\n with Database() as db:\n inspection = db.query(Inspection).filter(Inspection.id_survey == body['id_survey'], Inspection.is_completed == True).all()\n if len(inspection) > 0:\n self.remove(body['id_survey'])\n self.create(body)\n else:\n data = db.query(Table).get(body['id_survey'])\n if 'name' in body:\n data.id_language_content_name = MultiLang.set(body['name'])\n if 'survey_type' in body:\n data.survey_type = body['survey_type']\n if 'is_active' in body:\n data.is_active = body['is_active']\n db.commit()\n return {'message': 'survey successfully modified'}\n<|end_body_2|>\n\n<|body_start_3|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n data = db.query(Table).get(id_survey)\n data.is_active = False\n db.commit()\n return {'message': 'survey successfully removed'}\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000276", "length_bytes": 2534, "license_type": "no_license", "methods": [{"docstring": "Return the survey information :param id_survey: UUID", "name": "get", "signature": "def get(self, id_survey=None)"}, {"docstring": "Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }", "name": "create", "signature": "def create(self, body)"}, {"docstring": "Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }", "name": "modify", "signature": "def modify(self, body)"}, {"docstring": "Remove a survey :param id_survey: UUID", "name": "remove", "signature": "def remove(self, id_survey)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_000994", "prompt": "Implement the Python class `Survey` described below.\n\nClass description:\nImplement the Survey class.\n\nMethod signatures and docstrings:\n- def get(self, id_survey=None): Return the survey information :param id_survey: UUID\n- def create(self, body): Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }\n- def modify(self, body): Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }\n- def remove(self, id_survey): Remove a survey :param id_survey: UUID", "prompted_full_text": "Implement the Python class `Survey` described below.\n\nClass description:\nImplement the Survey class.\n\nMethod signatures and docstrings:\n- def get(self, id_survey=None): Return the survey information :param id_survey: UUID\n- def create(self, body): Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }\n- def modify(self, body): Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }\n- def remove(self, id_survey): Remove a survey :param id_survey: UUID\n\n<|skeleton|>\nclass Survey:\n\n def get(self, id_survey=None):\n \"\"\"Return the survey information :param id_survey: UUID\"\"\"\n <|body_0|>\n\n def create(self, body):\n \"\"\"Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }\"\"\"\n <|body_1|>\n\n def modify(self, body):\n \"\"\"Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }\"\"\"\n <|body_2|>\n\n def remove(self, id_survey):\n \"\"\"Remove a survey :param id_survey: UUID\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n if id_survey is None:\n data = db.query(Table).all()\n else:\n data = db.query(Table).get(id_survey)\n return {'data': data}\n<|end_body_0|>\n\n<|body_start_1|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n id_survey = uuid.uuid4()\n id_language_content = MultiLang.set(body['name'], True)\n with Database() as db:\n db.insert(Table(id_survey, id_language_content, body['survey_type']))\n db.commit()\n return {'id_survey': id_survey, 'message': 'survey successfully created'}\n<|end_body_1|>\n\n<|body_start_2|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n if 'id_survey' not in body:\n raise Exception('You need to pass a id_survey')\n with Database() as db:\n inspection = db.query(Inspection).filter(Inspection.id_survey == body['id_survey'], Inspection.is_completed == True).all()\n if len(inspection) > 0:\n self.remove(body['id_survey'])\n self.create(body)\n else:\n data = db.query(Table).get(body['id_survey'])\n if 'name' in body:\n data.id_language_content_name = MultiLang.set(body['name'])\n if 'survey_type' in body:\n data.survey_type = body['survey_type']\n if 'is_active' in body:\n data.is_active = body['is_active']\n db.commit()\n return {'message': 'survey successfully modified'}\n<|end_body_2|>\n\n<|body_start_3|>\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n data = db.query(Table).get(id_survey)\n data.is_active = False\n db.commit()\n return {'message': 'survey successfully removed'}\n<|end_body_3|>\n", "revision_id": "43bd57c466a5cd3b133ddc437cb4a6b9f007d267", "skeleton": "<|skeleton|>\nclass Survey:\n\n def get(self, id_survey=None):\n \"\"\"Return the survey information :param id_survey: UUID\"\"\"\n <|body_0|>\n\n def create(self, body):\n \"\"\"Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }\"\"\"\n <|body_1|>\n\n def modify(self, body):\n \"\"\"Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }\"\"\"\n <|body_2|>\n\n def remove(self, id_survey):\n \"\"\"Remove a survey :param id_survey: UUID\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Survey:\n def get(self, id_survey=None):\n \"\"\"Return the survey information :param id_survey: UUID\"\"\"\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n if id_survey is None:\n data = db.query(Table).all()\n else:\n data = db.query(Table).get(id_survey)\n return {'data': data}\n\n def create(self, body):\n \"\"\"Create a new survey :param body: { name: JSON, survey_type: ENUM('test'), questions: JSON }\"\"\"\n if self.has_permission('RightTPI') is False:\n self.no_access()\n id_survey = uuid.uuid4()\n id_language_content = MultiLang.set(body['name'], True)\n with Database() as db:\n db.insert(Table(id_survey, id_language_content, body['survey_type']))\n db.commit()\n return {'id_survey': id_survey, 'message': 'survey successfully created'}\n\n def modify(self, body):\n \"\"\"Modify a survey :param body: { id_survey: UUID, name: JSON, survey_type: ENUM('test'), is_active: BOOLEAN, questions: JSON }\"\"\"\n if self.has_permission('RightTPI') is False:\n self.no_access()\n if 'id_survey' not in body:\n raise Exception('You need to pass a id_survey')\n with Database() as db:\n inspection = db.query(Inspection).filter(Inspection.id_survey == body['id_survey'], Inspection.is_completed == True).all()\n if len(inspection) > 0:\n self.remove(body['id_survey'])\n self.create(body)\n else:\n data = db.query(Table).get(body['id_survey'])\n if 'name' in body:\n data.id_language_content_name = MultiLang.set(body['name'])\n if 'survey_type' in body:\n data.survey_type = body['survey_type']\n if 'is_active' in body:\n data.is_active = body['is_active']\n db.commit()\n return {'message': 'survey successfully modified'}\n\n def remove(self, id_survey):\n \"\"\"Remove a survey :param id_survey: UUID\"\"\"\n if self.has_permission('RightTPI') is False:\n self.no_access()\n with Database() as db:\n data = db.query(Table).get(id_survey)\n data.is_active = False\n db.commit()\n return {'message': 'survey successfully removed'}\n", "source": "the_stack_v2_python_sparse", "source_path": "resturls/survey.py", "source_repo": "CAUCA-9-1-1/survip-api", "split": "test", "star_events_count": 1} {"blob_id": "f53ff31d5bd9ea071c1707e185924075618814e9", "bodies": ["self._fric_dset = fric_dset\nL = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\nsuper().__init__(fric_h5, layers=L, min_area=None, hsds=hsds, check_layers=check_layers)", "mask = None\nif len(ds_slice) == 1 & isinstance(ds_slice[0], tuple):\n ds_slice = ds_slice[0]\nlayer_slice = (self._layers[self._fric_dset].name,) + ds_slice\nmask = self._layers[self._fric_dset][self.excl_h5[layer_slice]]\nmask[mask == self._layers[self._fric_dset].nodata_value] = 1\nreturn mask", "L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\nwith cls(excl_h5, *L, min_area=None, hsds=hsds) as f:\n mask = f.mask\nreturn mask"], "bodies_text": "<|body_start_0|>\n self._fric_dset = fric_dset\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n super().__init__(fric_h5, layers=L, min_area=None, hsds=hsds, check_layers=check_layers)\n<|end_body_0|>\n\n<|body_start_1|>\n mask = None\n if len(ds_slice) == 1 & isinstance(ds_slice[0], tuple):\n ds_slice = ds_slice[0]\n layer_slice = (self._layers[self._fric_dset].name,) + ds_slice\n mask = self._layers[self._fric_dset][self.excl_h5[layer_slice]]\n mask[mask == self._layers[self._fric_dset].nodata_value] = 1\n return mask\n<|end_body_1|>\n\n<|body_start_2|>\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n with cls(excl_h5, *L, min_area=None, hsds=hsds) as f:\n mask = f.mask\n return mask\n<|end_body_2|>\n", "class_docstring": "Class to handle exclusion-style friction layer.", "class_name": "FrictionMask", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FrictionMask:\n \"\"\"Class to handle exclusion-style friction layer.\"\"\"\n\n def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False):\n \"\"\"Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values\"\"\"\n <|body_0|>\n\n def _generate_mask(self, *ds_slice):\n \"\"\"Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.\"\"\"\n <|body_1|>\n\n def run(cls, excl_h5, fric_dset, hsds=False):\n \"\"\"Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._fric_dset = fric_dset\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n super().__init__(fric_h5, layers=L, min_area=None, hsds=hsds, check_layers=check_layers)\n<|end_body_0|>\n\n<|body_start_1|>\n mask = None\n if len(ds_slice) == 1 & isinstance(ds_slice[0], tuple):\n ds_slice = ds_slice[0]\n layer_slice = (self._layers[self._fric_dset].name,) + ds_slice\n mask = self._layers[self._fric_dset][self.excl_h5[layer_slice]]\n mask[mask == self._layers[self._fric_dset].nodata_value] = 1\n return mask\n<|end_body_1|>\n\n<|body_start_2|>\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n with cls(excl_h5, *L, min_area=None, hsds=hsds) as f:\n mask = f.mask\n return mask\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000277", "length_bytes": 37820, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values", "name": "__init__", "signature": "def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False)"}, {"docstring": "Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.", "name": "_generate_mask", "signature": "def _generate_mask(self, *ds_slice)"}, {"docstring": "Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask", "name": "run", "signature": "def run(cls, excl_h5, fric_dset, hsds=False)"}], "n_methods": 3, "prompt": "Implement the Python class `FrictionMask` described below.\n\nClass description:\nClass to handle exclusion-style friction layer.\n\nMethod signatures and docstrings:\n- def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False): Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values\n- def _generate_mask(self, *ds_slice): Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.\n- def run(cls, excl_h5, fric_dset, hsds=False): Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask", "prompted_full_text": "Implement the Python class `FrictionMask` described below.\n\nClass description:\nClass to handle exclusion-style friction layer.\n\nMethod signatures and docstrings:\n- def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False): Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values\n- def _generate_mask(self, *ds_slice): Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.\n- def run(cls, excl_h5, fric_dset, hsds=False): Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask\n\n<|skeleton|>\nclass FrictionMask:\n \"\"\"Class to handle exclusion-style friction layer.\"\"\"\n\n def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False):\n \"\"\"Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values\"\"\"\n <|body_0|>\n\n def _generate_mask(self, *ds_slice):\n \"\"\"Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.\"\"\"\n <|body_1|>\n\n def run(cls, excl_h5, fric_dset, hsds=False):\n \"\"\"Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._fric_dset = fric_dset\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n super().__init__(fric_h5, layers=L, min_area=None, hsds=hsds, check_layers=check_layers)\n<|end_body_0|>\n\n<|body_start_1|>\n mask = None\n if len(ds_slice) == 1 & isinstance(ds_slice[0], tuple):\n ds_slice = ds_slice[0]\n layer_slice = (self._layers[self._fric_dset].name,) + ds_slice\n mask = self._layers[self._fric_dset][self.excl_h5[layer_slice]]\n mask[mask == self._layers[self._fric_dset].nodata_value] = 1\n return mask\n<|end_body_1|>\n\n<|body_start_2|>\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n with cls(excl_h5, *L, min_area=None, hsds=hsds) as f:\n mask = f.mask\n return mask\n<|end_body_2|>\n", "revision_id": "497bb7d172197e09a9e14b1b1ca891b8c828b80a", "skeleton": "<|skeleton|>\nclass FrictionMask:\n \"\"\"Class to handle exclusion-style friction layer.\"\"\"\n\n def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False):\n \"\"\"Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values\"\"\"\n <|body_0|>\n\n def _generate_mask(self, *ds_slice):\n \"\"\"Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.\"\"\"\n <|body_1|>\n\n def run(cls, excl_h5, fric_dset, hsds=False):\n \"\"\"Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FrictionMask:\n \"\"\"Class to handle exclusion-style friction layer.\"\"\"\n\n def __init__(self, fric_h5, fric_dset, hsds=False, check_layers=False):\n \"\"\"Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS check_layers : bool Run a pre-flight check on each layer to ensure they contain un-excluded values\"\"\"\n self._fric_dset = fric_dset\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n super().__init__(fric_h5, layers=L, min_area=None, hsds=hsds, check_layers=check_layers)\n\n def _generate_mask(self, *ds_slice):\n \"\"\"Generate multiplicative friction layer mask. Parameters ---------- ds_slice : int | slice | list | ndarray What to extract from ds, each arg is for a sequential axis. For example, (slice(0, 64), slice(0, 64)) will extract a 64x64 exclusions mask. Returns ------- mask : ndarray Multiplicative friction layer mask with nodata values set to 1.\"\"\"\n mask = None\n if len(ds_slice) == 1 & isinstance(ds_slice[0], tuple):\n ds_slice = ds_slice[0]\n layer_slice = (self._layers[self._fric_dset].name,) + ds_slice\n mask = self._layers[self._fric_dset][self.excl_h5[layer_slice]]\n mask[mask == self._layers[self._fric_dset].nodata_value] = 1\n return mask\n\n def run(cls, excl_h5, fric_dset, hsds=False):\n \"\"\"Create inclusion mask from given layers dictionary Parameters ---------- fric_h5 : str Path to friction layer .h5 file (same format as exclusions file) fric_dset : str Friction layer dataset in fric_h5 hsds : bool Boolean flag to use h5pyd to handle .h5 'files' hosted on AWS behind HSDS Returns ------- mask : ndarray Full inclusion mask\"\"\"\n L = [LayerMask(fric_dset, use_as_weights=True, exclude_nodata=False)]\n with cls(excl_h5, *L, min_area=None, hsds=hsds) as f:\n mask = f.mask\n return mask\n", "source": "the_stack_v2_python_sparse", "source_path": "reV/supply_curve/exclusions.py", "source_repo": "NREL/reV", "split": "test", "star_events_count": 53} {"blob_id": "21303e0f6b18ddbc9265e8eccc492f26039f6e85", "bodies": ["if self.value_type in self._values:\n return self._values[self.value_type] == STATE_ON\nreturn False", "pres = self.gateway.const.Presentation\nclass_map = {pres.S_DOOR: 'opening', pres.S_MOTION: 'motion', pres.S_SMOKE: 'smoke'}\nif float(self.gateway.protocol_version) >= 1.5:\n class_map.update({pres.S_SPRINKLER: 'sprinkler', pres.S_WATER_LEAK: 'leak', pres.S_SOUND: 'sound', pres.S_VIBRATION: 'vibration', pres.S_MOISTURE: 'moisture'})\nif class_map.get(self.child_type) in SENSOR_CLASSES:\n return class_map.get(self.child_type)"], "bodies_text": "<|body_start_0|>\n if self.value_type in self._values:\n return self._values[self.value_type] == STATE_ON\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n pres = self.gateway.const.Presentation\n class_map = {pres.S_DOOR: 'opening', pres.S_MOTION: 'motion', pres.S_SMOKE: 'smoke'}\n if float(self.gateway.protocol_version) >= 1.5:\n class_map.update({pres.S_SPRINKLER: 'sprinkler', pres.S_WATER_LEAK: 'leak', pres.S_SOUND: 'sound', pres.S_VIBRATION: 'vibration', pres.S_MOISTURE: 'moisture'})\n if class_map.get(self.child_type) in SENSOR_CLASSES:\n return class_map.get(self.child_type)\n<|end_body_1|>\n", "class_docstring": "Represent the value of a MySensors Binary Sensor child node.", "class_name": "MySensorsBinarySensor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MySensorsBinarySensor:\n \"\"\"Represent the value of a MySensors Binary Sensor child node.\"\"\"\n\n def is_on(self):\n \"\"\"Return True if the binary sensor is on.\"\"\"\n <|body_0|>\n\n def sensor_class(self):\n \"\"\"Return the class of this sensor, from SENSOR_CLASSES.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.value_type in self._values:\n return self._values[self.value_type] == STATE_ON\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n pres = self.gateway.const.Presentation\n class_map = {pres.S_DOOR: 'opening', pres.S_MOTION: 'motion', pres.S_SMOKE: 'smoke'}\n if float(self.gateway.protocol_version) >= 1.5:\n class_map.update({pres.S_SPRINKLER: 'sprinkler', pres.S_WATER_LEAK: 'leak', pres.S_SOUND: 'sound', pres.S_VIBRATION: 'vibration', pres.S_MOISTURE: 'moisture'})\n if class_map.get(self.child_type) in SENSOR_CLASSES:\n return class_map.get(self.child_type)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000278", "length_bytes": 2829, "license_type": "permissive", "methods": [{"docstring": "Return True if the binary sensor is on.", "name": "is_on", "signature": "def is_on(self)"}, {"docstring": "Return the class of this sensor, from SENSOR_CLASSES.", "name": "sensor_class", "signature": "def sensor_class(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024931", "prompt": "Implement the Python class `MySensorsBinarySensor` described below.\n\nClass description:\nRepresent the value of a MySensors Binary Sensor child node.\n\nMethod signatures and docstrings:\n- def is_on(self): Return True if the binary sensor is on.\n- def sensor_class(self): Return the class of this sensor, from SENSOR_CLASSES.", "prompted_full_text": "Implement the Python class `MySensorsBinarySensor` described below.\n\nClass description:\nRepresent the value of a MySensors Binary Sensor child node.\n\nMethod signatures and docstrings:\n- def is_on(self): Return True if the binary sensor is on.\n- def sensor_class(self): Return the class of this sensor, from SENSOR_CLASSES.\n\n<|skeleton|>\nclass MySensorsBinarySensor:\n \"\"\"Represent the value of a MySensors Binary Sensor child node.\"\"\"\n\n def is_on(self):\n \"\"\"Return True if the binary sensor is on.\"\"\"\n <|body_0|>\n\n def sensor_class(self):\n \"\"\"Return the class of this sensor, from SENSOR_CLASSES.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.value_type in self._values:\n return self._values[self.value_type] == STATE_ON\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n pres = self.gateway.const.Presentation\n class_map = {pres.S_DOOR: 'opening', pres.S_MOTION: 'motion', pres.S_SMOKE: 'smoke'}\n if float(self.gateway.protocol_version) >= 1.5:\n class_map.update({pres.S_SPRINKLER: 'sprinkler', pres.S_WATER_LEAK: 'leak', pres.S_SOUND: 'sound', pres.S_VIBRATION: 'vibration', pres.S_MOISTURE: 'moisture'})\n if class_map.get(self.child_type) in SENSOR_CLASSES:\n return class_map.get(self.child_type)\n<|end_body_1|>\n", "revision_id": "ca0e92aba83de2fd6cb1cc4d14f3b4471f17cf3d", "skeleton": "<|skeleton|>\nclass MySensorsBinarySensor:\n \"\"\"Represent the value of a MySensors Binary Sensor child node.\"\"\"\n\n def is_on(self):\n \"\"\"Return True if the binary sensor is on.\"\"\"\n <|body_0|>\n\n def sensor_class(self):\n \"\"\"Return the class of this sensor, from SENSOR_CLASSES.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MySensorsBinarySensor:\n \"\"\"Represent the value of a MySensors Binary Sensor child node.\"\"\"\n\n def is_on(self):\n \"\"\"Return True if the binary sensor is on.\"\"\"\n if self.value_type in self._values:\n return self._values[self.value_type] == STATE_ON\n return False\n\n def sensor_class(self):\n \"\"\"Return the class of this sensor, from SENSOR_CLASSES.\"\"\"\n pres = self.gateway.const.Presentation\n class_map = {pres.S_DOOR: 'opening', pres.S_MOTION: 'motion', pres.S_SMOKE: 'smoke'}\n if float(self.gateway.protocol_version) >= 1.5:\n class_map.update({pres.S_SPRINKLER: 'sprinkler', pres.S_WATER_LEAK: 'leak', pres.S_SOUND: 'sound', pres.S_VIBRATION: 'vibration', pres.S_MOISTURE: 'moisture'})\n if class_map.get(self.child_type) in SENSOR_CLASSES:\n return class_map.get(self.child_type)\n", "source": "the_stack_v2_python_sparse", "source_path": "homeassistant/components/binary_sensor/mysensors.py", "source_repo": "Smart-Torvy/torvy-home-assistant", "split": "test", "star_events_count": 2} {"blob_id": "d9007df6a3ea20c6591014f0df5c7994796f9da5", "bodies": ["thousand_g = info_dict.get('1000GAF')\nif thousand_g:\n logger.debug('Updating thousand_g to: {0}'.format(thousand_g))\n variant_obj.thousand_g = float(thousand_g)\n variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))", "for transcript in variant_obj.transcripts:\n gmaf_raw = transcript.GMAF\n if gmaf_raw:\n gmaf = float(gmaf_raw.split(':')[-1])\n variant_obj.add_frequency('GMAF', gmaf)\n if not variant_obj.thousand_g:\n variant_obj.thousand_g = gmaf", "exac = None\nexac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\nfor key in exac_keys:\n if info_dict.get(key):\n exac = float(info_dict[key])\nif not exac:\n for transcript in variant_obj.transcripts:\n exac_raw = transcript.ExAC_MAF\n if exac_raw:\n exac = float(exac_raw.split(':')[-1])\nif exac:\n variant_obj.add_frequency('ExAC', exac)"], "bodies_text": "<|body_start_0|>\n thousand_g = info_dict.get('1000GAF')\n if thousand_g:\n logger.debug('Updating thousand_g to: {0}'.format(thousand_g))\n variant_obj.thousand_g = float(thousand_g)\n variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))\n<|end_body_0|>\n\n<|body_start_1|>\n for transcript in variant_obj.transcripts:\n gmaf_raw = transcript.GMAF\n if gmaf_raw:\n gmaf = float(gmaf_raw.split(':')[-1])\n variant_obj.add_frequency('GMAF', gmaf)\n if not variant_obj.thousand_g:\n variant_obj.thousand_g = gmaf\n<|end_body_1|>\n\n<|body_start_2|>\n exac = None\n exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\n for key in exac_keys:\n if info_dict.get(key):\n exac = float(info_dict[key])\n if not exac:\n for transcript in variant_obj.transcripts:\n exac_raw = transcript.ExAC_MAF\n if exac_raw:\n exac = float(exac_raw.split(':')[-1])\n if exac:\n variant_obj.add_frequency('ExAC', exac)\n<|end_body_2|>\n", "class_docstring": "Methods for adding frequencies", "class_name": "FrequenciesExtras", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FrequenciesExtras:\n \"\"\"Methods for adding frequencies\"\"\"\n\n def _add_thousand_g(self, variant_obj, info_dict):\n \"\"\"Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_0|>\n\n def _add_gmaf(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_1|>\n\n def _add_exac(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n thousand_g = info_dict.get('1000GAF')\n if thousand_g:\n logger.debug('Updating thousand_g to: {0}'.format(thousand_g))\n variant_obj.thousand_g = float(thousand_g)\n variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))\n<|end_body_0|>\n\n<|body_start_1|>\n for transcript in variant_obj.transcripts:\n gmaf_raw = transcript.GMAF\n if gmaf_raw:\n gmaf = float(gmaf_raw.split(':')[-1])\n variant_obj.add_frequency('GMAF', gmaf)\n if not variant_obj.thousand_g:\n variant_obj.thousand_g = gmaf\n<|end_body_1|>\n\n<|body_start_2|>\n exac = None\n exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\n for key in exac_keys:\n if info_dict.get(key):\n exac = float(info_dict[key])\n if not exac:\n for transcript in variant_obj.transcripts:\n exac_raw = transcript.ExAC_MAF\n if exac_raw:\n exac = float(exac_raw.split(':')[-1])\n if exac:\n variant_obj.add_frequency('ExAC', exac)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000279", "length_bytes": 2040, "license_type": "permissive", "methods": [{"docstring": "Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary", "name": "_add_thousand_g", "signature": "def _add_thousand_g(self, variant_obj, info_dict)"}, {"docstring": "Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary", "name": "_add_gmaf", "signature": "def _add_gmaf(self, variant_obj, info_dict)"}, {"docstring": "Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary", "name": "_add_exac", "signature": "def _add_exac(self, variant_obj, info_dict)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_025112", "prompt": "Implement the Python class `FrequenciesExtras` described below.\n\nClass description:\nMethods for adding frequencies\n\nMethod signatures and docstrings:\n- def _add_thousand_g(self, variant_obj, info_dict): Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\n- def _add_gmaf(self, variant_obj, info_dict): Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\n- def _add_exac(self, variant_obj, info_dict): Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary", "prompted_full_text": "Implement the Python class `FrequenciesExtras` described below.\n\nClass description:\nMethods for adding frequencies\n\nMethod signatures and docstrings:\n- def _add_thousand_g(self, variant_obj, info_dict): Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\n- def _add_gmaf(self, variant_obj, info_dict): Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\n- def _add_exac(self, variant_obj, info_dict): Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\n\n<|skeleton|>\nclass FrequenciesExtras:\n \"\"\"Methods for adding frequencies\"\"\"\n\n def _add_thousand_g(self, variant_obj, info_dict):\n \"\"\"Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_0|>\n\n def _add_gmaf(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_1|>\n\n def _add_exac(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n thousand_g = info_dict.get('1000GAF')\n if thousand_g:\n logger.debug('Updating thousand_g to: {0}'.format(thousand_g))\n variant_obj.thousand_g = float(thousand_g)\n variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))\n<|end_body_0|>\n\n<|body_start_1|>\n for transcript in variant_obj.transcripts:\n gmaf_raw = transcript.GMAF\n if gmaf_raw:\n gmaf = float(gmaf_raw.split(':')[-1])\n variant_obj.add_frequency('GMAF', gmaf)\n if not variant_obj.thousand_g:\n variant_obj.thousand_g = gmaf\n<|end_body_1|>\n\n<|body_start_2|>\n exac = None\n exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\n for key in exac_keys:\n if info_dict.get(key):\n exac = float(info_dict[key])\n if not exac:\n for transcript in variant_obj.transcripts:\n exac_raw = transcript.ExAC_MAF\n if exac_raw:\n exac = float(exac_raw.split(':')[-1])\n if exac:\n variant_obj.add_frequency('ExAC', exac)\n<|end_body_2|>\n", "revision_id": "9476f05b416d3a5135d25492cb31411fdf831c58", "skeleton": "<|skeleton|>\nclass FrequenciesExtras:\n \"\"\"Methods for adding frequencies\"\"\"\n\n def _add_thousand_g(self, variant_obj, info_dict):\n \"\"\"Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_0|>\n\n def _add_gmaf(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_1|>\n\n def _add_exac(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FrequenciesExtras:\n \"\"\"Methods for adding frequencies\"\"\"\n\n def _add_thousand_g(self, variant_obj, info_dict):\n \"\"\"Add the thousand genomes frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n thousand_g = info_dict.get('1000GAF')\n if thousand_g:\n logger.debug('Updating thousand_g to: {0}'.format(thousand_g))\n variant_obj.thousand_g = float(thousand_g)\n variant_obj.add_frequency('1000GAF', variant_obj.get('thousand_g'))\n\n def _add_gmaf(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n for transcript in variant_obj.transcripts:\n gmaf_raw = transcript.GMAF\n if gmaf_raw:\n gmaf = float(gmaf_raw.split(':')[-1])\n variant_obj.add_frequency('GMAF', gmaf)\n if not variant_obj.thousand_g:\n variant_obj.thousand_g = gmaf\n\n def _add_exac(self, variant_obj, info_dict):\n \"\"\"Add the gmaf frequency Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary\"\"\"\n exac = None\n exac_keys = ['ExAC', 'EXAC', 'ExACAF', 'EXACAF']\n for key in exac_keys:\n if info_dict.get(key):\n exac = float(info_dict[key])\n if not exac:\n for transcript in variant_obj.transcripts:\n exac_raw = transcript.ExAC_MAF\n if exac_raw:\n exac = float(exac_raw.split(':')[-1])\n if exac:\n variant_obj.add_frequency('ExAC', exac)\n", "source": "the_stack_v2_python_sparse", "source_path": "puzzle/plugins/vcf/mixins/variant_extras/frequencies.py", "source_repo": "haoziyeung/puzzle", "split": "test", "star_events_count": 0} {"blob_id": "8055435cc00e83abe6be97f7a9599beff8535378", "bodies": ["start = end = 0\nlen_nums = len(nums)\nwhile start < len_nums and nums[start] != 0:\n start += 1\n end += 1\nwhile start < len_nums:\n while end < len_nums and nums[end] == 0:\n end += 1\n if end == len_nums:\n break\n nums[start] = nums[end]\n start += 1\n end += 1\nwhile start < len(nums):\n nums[start] = 0\n start += 1\nreturn nums", "fast = slow = 0\nwhile fast < len(nums):\n while slow < len(nums) and nums[slow] != 0:\n slow += 1\n fast += 1\n if fast < len(nums) and nums[fast] != 0:\n nums[fast], nums[slow] = (nums[slow], nums[fast])\n slow += 1\n fast += 1\nprint(nums)", "if len(nums) == 0:\n return\nlen_nums = len(nums) - 1\nstart = 0\nwhile start < len_nums:\n while start < len_nums and nums[start] != 0:\n start += 1\n index = start\n while index < len_nums and nums[index] == 0:\n index += 1\n if index == len_nums and nums[index] == 0:\n break\n if index > start:\n nums[start] = nums[index]\n nums[index] = 0\n start += 1\n index += 1\nprint(nums)\npass"], "bodies_text": "<|body_start_0|>\n start = end = 0\n len_nums = len(nums)\n while start < len_nums and nums[start] != 0:\n start += 1\n end += 1\n while start < len_nums:\n while end < len_nums and nums[end] == 0:\n end += 1\n if end == len_nums:\n break\n nums[start] = nums[end]\n start += 1\n end += 1\n while start < len(nums):\n nums[start] = 0\n start += 1\n return nums\n<|end_body_0|>\n\n<|body_start_1|>\n fast = slow = 0\n while fast < len(nums):\n while slow < len(nums) and nums[slow] != 0:\n slow += 1\n fast += 1\n if fast < len(nums) and nums[fast] != 0:\n nums[fast], nums[slow] = (nums[slow], nums[fast])\n slow += 1\n fast += 1\n print(nums)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(nums) == 0:\n return\n len_nums = len(nums) - 1\n start = 0\n while start < len_nums:\n while start < len_nums and nums[start] != 0:\n start += 1\n index = start\n while index < len_nums and nums[index] == 0:\n index += 1\n if index == len_nums and nums[index] == 0:\n break\n if index > start:\n nums[start] = nums[index]\n nums[index] = 0\n start += 1\n index += 1\n print(nums)\n pass\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:\"\"\"\n <|body_0|>\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:\"\"\"\n <|body_1|>\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n start = end = 0\n len_nums = len(nums)\n while start < len_nums and nums[start] != 0:\n start += 1\n end += 1\n while start < len_nums:\n while end < len_nums and nums[end] == 0:\n end += 1\n if end == len_nums:\n break\n nums[start] = nums[end]\n start += 1\n end += 1\n while start < len(nums):\n nums[start] = 0\n start += 1\n return nums\n<|end_body_0|>\n\n<|body_start_1|>\n fast = slow = 0\n while fast < len(nums):\n while slow < len(nums) and nums[slow] != 0:\n slow += 1\n fast += 1\n if fast < len(nums) and nums[fast] != 0:\n nums[fast], nums[slow] = (nums[slow], nums[fast])\n slow += 1\n fast += 1\n print(nums)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(nums) == 0:\n return\n len_nums = len(nums) - 1\n start = 0\n while start < len_nums:\n while start < len_nums and nums[start] != 0:\n start += 1\n index = start\n while index < len_nums and nums[index] == 0:\n index += 1\n if index == len_nums and nums[index] == 0:\n break\n if index > start:\n nums[start] = nums[index]\n nums[index] = 0\n start += 1\n index += 1\n print(nums)\n pass\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000280", "length_bytes": 2634, "license_type": "no_license", "methods": [{"docstring": "执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:", "name": "moveZeroes", "signature": "def moveZeroes(self, nums: List[int]) -> None"}, {"docstring": "执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:", "name": "moveZeroes2", "signature": "def moveZeroes2(self, nums: List[int]) -> None"}, {"docstring": "执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户", "name": "moveZeroes1", "signature": "def moveZeroes1(self, nums: List[int]) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_032591", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums: List[int]) -> None: 执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:\n- def moveZeroes2(self, nums: List[int]) -> None: 执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:\n- def moveZeroes1(self, nums: List[int]) -> None: 执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes(self, nums: List[int]) -> None: 执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:\n- def moveZeroes2(self, nums: List[int]) -> None: 执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:\n- def moveZeroes1(self, nums: List[int]) -> None: 执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户\n\n<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:\"\"\"\n <|body_0|>\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:\"\"\"\n <|body_1|>\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n start = end = 0\n len_nums = len(nums)\n while start < len_nums and nums[start] != 0:\n start += 1\n end += 1\n while start < len_nums:\n while end < len_nums and nums[end] == 0:\n end += 1\n if end == len_nums:\n break\n nums[start] = nums[end]\n start += 1\n end += 1\n while start < len(nums):\n nums[start] = 0\n start += 1\n return nums\n<|end_body_0|>\n\n<|body_start_1|>\n fast = slow = 0\n while fast < len(nums):\n while slow < len(nums) and nums[slow] != 0:\n slow += 1\n fast += 1\n if fast < len(nums) and nums[fast] != 0:\n nums[fast], nums[slow] = (nums[slow], nums[fast])\n slow += 1\n fast += 1\n print(nums)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(nums) == 0:\n return\n len_nums = len(nums) - 1\n start = 0\n while start < len_nums:\n while start < len_nums and nums[start] != 0:\n start += 1\n index = start\n while index < len_nums and nums[index] == 0:\n index += 1\n if index == len_nums and nums[index] == 0:\n break\n if index > start:\n nums[start] = nums[index]\n nums[index] = 0\n start += 1\n index += 1\n print(nums)\n pass\n<|end_body_2|>\n", "revision_id": "d613ed8a5a2c15ace7d513965b372d128845d66a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:\"\"\"\n <|body_0|>\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:\"\"\"\n <|body_1|>\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n \"\"\"执行用时: 36 ms , 在所有 Python3 提交中击败了 90.63% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户 :param nums: :return:\"\"\"\n start = end = 0\n len_nums = len(nums)\n while start < len_nums and nums[start] != 0:\n start += 1\n end += 1\n while start < len_nums:\n while end < len_nums and nums[end] == 0:\n end += 1\n if end == len_nums:\n break\n nums[start] = nums[end]\n start += 1\n end += 1\n while start < len(nums):\n nums[start] = 0\n start += 1\n return nums\n\n def moveZeroes2(self, nums: List[int]) -> None:\n \"\"\"执行用时: 40 ms , 在所有 Python3 提交中击败了 77.26% 的用户 内存消耗: 15.3 MB , 在所有 Python3 提交中击败了 6.02% 的用户 :param nums: :return:\"\"\"\n fast = slow = 0\n while fast < len(nums):\n while slow < len(nums) and nums[slow] != 0:\n slow += 1\n fast += 1\n if fast < len(nums) and nums[fast] != 0:\n nums[fast], nums[slow] = (nums[slow], nums[fast])\n slow += 1\n fast += 1\n print(nums)\n\n def moveZeroes1(self, nums: List[int]) -> None:\n \"\"\"执行用时: 368 ms , 在所有 Python3 提交中击败了 5.54% 的用户 内存消耗: 15.2 MB , 在所有 Python3 提交中击败了 44.04% 的用户\"\"\"\n if len(nums) == 0:\n return\n len_nums = len(nums) - 1\n start = 0\n while start < len_nums:\n while start < len_nums and nums[start] != 0:\n start += 1\n index = start\n while index < len_nums and nums[index] == 0:\n index += 1\n if index == len_nums and nums[index] == 0:\n break\n if index > start:\n nums[start] = nums[index]\n nums[index] = 0\n start += 1\n index += 1\n print(nums)\n pass\n", "source": "the_stack_v2_python_sparse", "source_path": "move_zeroes.py", "source_repo": "nomboy/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "3656d5c3bcedf6b10c367f2212b152be9ed7c855", "bodies": ["self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=conf.RBMQ_HOST))\nself.__log = mylib.mylog(conf.AGENT_LOG)\nself.__channel = self.__connection.channel()\nself.__channel.exchange_declare(exchange=conf.EXCHANGE, type='fanout')\nres = self.__channel.queue_declare(durable=True)\nself.__queue_name = res.method.queue", "import subprocess\ntry:\n p = subprocess.Popen(commend, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n error = p.stderr.read()\n if not error:\n res = p.stdout.read()\n else:\n res = error\n res = str(res, 'utf8')\nexcept Exception as e:\n res = e\nprint(res)\nreturn '[%s]\\n%s' % (conf.AGENT_NAME, res)", "commend = body.decode()\nself.__log.info('run commend %s' % body.decode())\nresponse = self.__run_commend(commend)\nch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id=props.correlation_id), body=str(response))\nch.basic_ack(delivery_tag=method.delivery_tag)", "self.__channel.queue_bind(exchange=conf.EXCHANGE, queue=self.__queue_name)\nself.__channel.basic_consume(self.__on_request, queue=self.__queue_name)\nprint(' [x] Awaiting RPC requests')\nself.__channel.start_consuming()"], "bodies_text": "<|body_start_0|>\n self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=conf.RBMQ_HOST))\n self.__log = mylib.mylog(conf.AGENT_LOG)\n self.__channel = self.__connection.channel()\n self.__channel.exchange_declare(exchange=conf.EXCHANGE, type='fanout')\n res = self.__channel.queue_declare(durable=True)\n self.__queue_name = res.method.queue\n<|end_body_0|>\n\n<|body_start_1|>\n import subprocess\n try:\n p = subprocess.Popen(commend, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n error = p.stderr.read()\n if not error:\n res = p.stdout.read()\n else:\n res = error\n res = str(res, 'utf8')\n except Exception as e:\n res = e\n print(res)\n return '[%s]\\n%s' % (conf.AGENT_NAME, res)\n<|end_body_1|>\n\n<|body_start_2|>\n commend = body.decode()\n self.__log.info('run commend %s' % body.decode())\n response = self.__run_commend(commend)\n ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id=props.correlation_id), body=str(response))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n<|end_body_2|>\n\n<|body_start_3|>\n self.__channel.queue_bind(exchange=conf.EXCHANGE, queue=self.__queue_name)\n self.__channel.basic_consume(self.__on_request, queue=self.__queue_name)\n print(' [x] Awaiting RPC requests')\n self.__channel.start_consuming()\n<|end_body_3|>\n", "class_docstring": "", "class_name": "rpcAgent", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass rpcAgent:\n\n def __init__(self):\n \"\"\"构造方法 :return: 无\"\"\"\n <|body_0|>\n\n def __run_commend(self, commend):\n \"\"\"运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果\"\"\"\n <|body_1|>\n\n def __on_request(self, ch, method, props, body):\n \"\"\"回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:\"\"\"\n <|body_2|>\n\n def run(self):\n \"\"\"agent入口方法 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=conf.RBMQ_HOST))\n self.__log = mylib.mylog(conf.AGENT_LOG)\n self.__channel = self.__connection.channel()\n self.__channel.exchange_declare(exchange=conf.EXCHANGE, type='fanout')\n res = self.__channel.queue_declare(durable=True)\n self.__queue_name = res.method.queue\n<|end_body_0|>\n\n<|body_start_1|>\n import subprocess\n try:\n p = subprocess.Popen(commend, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n error = p.stderr.read()\n if not error:\n res = p.stdout.read()\n else:\n res = error\n res = str(res, 'utf8')\n except Exception as e:\n res = e\n print(res)\n return '[%s]\\n%s' % (conf.AGENT_NAME, res)\n<|end_body_1|>\n\n<|body_start_2|>\n commend = body.decode()\n self.__log.info('run commend %s' % body.decode())\n response = self.__run_commend(commend)\n ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id=props.correlation_id), body=str(response))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n<|end_body_2|>\n\n<|body_start_3|>\n self.__channel.queue_bind(exchange=conf.EXCHANGE, queue=self.__queue_name)\n self.__channel.basic_consume(self.__on_request, queue=self.__queue_name)\n print(' [x] Awaiting RPC requests')\n self.__channel.start_consuming()\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000281", "length_bytes": 2560, "license_type": "no_license", "methods": [{"docstring": "构造方法 :return: 无", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果", "name": "__run_commend", "signature": "def __run_commend(self, commend)"}, {"docstring": "回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:", "name": "__on_request", "signature": "def __on_request(self, ch, method, props, body)"}, {"docstring": "agent入口方法 :return:", "name": "run", "signature": "def run(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_014322", "prompt": "Implement the Python class `rpcAgent` described below.\n\nClass description:\nImplement the rpcAgent class.\n\nMethod signatures and docstrings:\n- def __init__(self): 构造方法 :return: 无\n- def __run_commend(self, commend): 运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果\n- def __on_request(self, ch, method, props, body): 回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:\n- def run(self): agent入口方法 :return:", "prompted_full_text": "Implement the Python class `rpcAgent` described below.\n\nClass description:\nImplement the rpcAgent class.\n\nMethod signatures and docstrings:\n- def __init__(self): 构造方法 :return: 无\n- def __run_commend(self, commend): 运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果\n- def __on_request(self, ch, method, props, body): 回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:\n- def run(self): agent入口方法 :return:\n\n<|skeleton|>\nclass rpcAgent:\n\n def __init__(self):\n \"\"\"构造方法 :return: 无\"\"\"\n <|body_0|>\n\n def __run_commend(self, commend):\n \"\"\"运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果\"\"\"\n <|body_1|>\n\n def __on_request(self, ch, method, props, body):\n \"\"\"回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:\"\"\"\n <|body_2|>\n\n def run(self):\n \"\"\"agent入口方法 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=conf.RBMQ_HOST))\n self.__log = mylib.mylog(conf.AGENT_LOG)\n self.__channel = self.__connection.channel()\n self.__channel.exchange_declare(exchange=conf.EXCHANGE, type='fanout')\n res = self.__channel.queue_declare(durable=True)\n self.__queue_name = res.method.queue\n<|end_body_0|>\n\n<|body_start_1|>\n import subprocess\n try:\n p = subprocess.Popen(commend, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n error = p.stderr.read()\n if not error:\n res = p.stdout.read()\n else:\n res = error\n res = str(res, 'utf8')\n except Exception as e:\n res = e\n print(res)\n return '[%s]\\n%s' % (conf.AGENT_NAME, res)\n<|end_body_1|>\n\n<|body_start_2|>\n commend = body.decode()\n self.__log.info('run commend %s' % body.decode())\n response = self.__run_commend(commend)\n ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id=props.correlation_id), body=str(response))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n<|end_body_2|>\n\n<|body_start_3|>\n self.__channel.queue_bind(exchange=conf.EXCHANGE, queue=self.__queue_name)\n self.__channel.basic_consume(self.__on_request, queue=self.__queue_name)\n print(' [x] Awaiting RPC requests')\n self.__channel.start_consuming()\n<|end_body_3|>\n", "revision_id": "c4bcad9e0b6e2d3dab89fb67f869c3102890724a", "skeleton": "<|skeleton|>\nclass rpcAgent:\n\n def __init__(self):\n \"\"\"构造方法 :return: 无\"\"\"\n <|body_0|>\n\n def __run_commend(self, commend):\n \"\"\"运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果\"\"\"\n <|body_1|>\n\n def __on_request(self, ch, method, props, body):\n \"\"\"回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:\"\"\"\n <|body_2|>\n\n def run(self):\n \"\"\"agent入口方法 :return:\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class rpcAgent:\n def __init__(self):\n \"\"\"构造方法 :return: 无\"\"\"\n self.__connection = pika.BlockingConnection(pika.ConnectionParameters(host=conf.RBMQ_HOST))\n self.__log = mylib.mylog(conf.AGENT_LOG)\n self.__channel = self.__connection.channel()\n self.__channel.exchange_declare(exchange=conf.EXCHANGE, type='fanout')\n res = self.__channel.queue_declare(durable=True)\n self.__queue_name = res.method.queue\n\n def __run_commend(self, commend):\n \"\"\"运行命令方法 :param commend: 要执行的命令 :return: 返回命令执行的结果\"\"\"\n import subprocess\n try:\n p = subprocess.Popen(commend, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n error = p.stderr.read()\n if not error:\n res = p.stdout.read()\n else:\n res = error\n res = str(res, 'utf8')\n except Exception as e:\n res = e\n print(res)\n return '[%s]\\n%s' % (conf.AGENT_NAME, res)\n\n def __on_request(self, ch, method, props, body):\n \"\"\"回调方法,当收到消息的时候将自动调用这个方法 :param ch: :param method: :param props: :param body: :return:\"\"\"\n commend = body.decode()\n self.__log.info('run commend %s' % body.decode())\n response = self.__run_commend(commend)\n ch.basic_publish(exchange='', routing_key=props.reply_to, properties=pika.BasicProperties(correlation_id=props.correlation_id), body=str(response))\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n def run(self):\n \"\"\"agent入口方法 :return:\"\"\"\n self.__channel.queue_bind(exchange=conf.EXCHANGE, queue=self.__queue_name)\n self.__channel.basic_consume(self.__on_request, queue=self.__queue_name)\n print(' [x] Awaiting RPC requests')\n self.__channel.start_consuming()\n", "source": "the_stack_v2_python_sparse", "source_path": "OldBoy_Python-master/day10/homework/RPC_framework/model/rpcAgent.py", "source_repo": "diji99/upl_learning", "split": "test", "star_events_count": 1} {"blob_id": "da45aafd2fde5d2469527eac3e59a99fe1c74229", "bodies": ["affirmative = 'This is a pythagorean triple.'\nself.affirmative = affirmative\nnegative = 'This is not a pythagorean triple.'\nself.negative = negative", "count = 0\nsides = []\nwhile count < 3:\n answers = float(input('Please enter the sides of the triangle: '))\n sides.append(answers)\n count += 1\nreturn sides", "call = Calculate()\nnum1 = sides[0] ** 2 + sides[1] ** 2\nnum2 = sides[2] ** 2\nnum3 = sides[2] ** 2 + sides[1] ** 2\nnum4 = sides[0] ** 2\nnum5 = sides[2] ** 2 + sides[0] ** 2\nnum6 = sides[1] ** 2\nif num1 == num2:\n print(call.affirmative)\nelif num3 == num4:\n print(call.affirmative)\nelif num5 == num6:\n print(call.affirmative)\nelse:\n print(call.negative)"], "bodies_text": "<|body_start_0|>\n affirmative = 'This is a pythagorean triple.'\n self.affirmative = affirmative\n negative = 'This is not a pythagorean triple.'\n self.negative = negative\n<|end_body_0|>\n\n<|body_start_1|>\n count = 0\n sides = []\n while count < 3:\n answers = float(input('Please enter the sides of the triangle: '))\n sides.append(answers)\n count += 1\n return sides\n<|end_body_1|>\n\n<|body_start_2|>\n call = Calculate()\n num1 = sides[0] ** 2 + sides[1] ** 2\n num2 = sides[2] ** 2\n num3 = sides[2] ** 2 + sides[1] ** 2\n num4 = sides[0] ** 2\n num5 = sides[2] ** 2 + sides[0] ** 2\n num6 = sides[1] ** 2\n if num1 == num2:\n print(call.affirmative)\n elif num3 == num4:\n print(call.affirmative)\n elif num5 == num6:\n print(call.affirmative)\n else:\n print(call.negative)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Calculate", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Calculate:\n\n def __init__(self):\n \"\"\"Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative\"\"\"\n <|body_0|>\n\n def list_input(self):\n \"\"\"Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides\"\"\"\n <|body_1|>\n\n def pythag_check(self, sides):\n \"\"\"Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n affirmative = 'This is a pythagorean triple.'\n self.affirmative = affirmative\n negative = 'This is not a pythagorean triple.'\n self.negative = negative\n<|end_body_0|>\n\n<|body_start_1|>\n count = 0\n sides = []\n while count < 3:\n answers = float(input('Please enter the sides of the triangle: '))\n sides.append(answers)\n count += 1\n return sides\n<|end_body_1|>\n\n<|body_start_2|>\n call = Calculate()\n num1 = sides[0] ** 2 + sides[1] ** 2\n num2 = sides[2] ** 2\n num3 = sides[2] ** 2 + sides[1] ** 2\n num4 = sides[0] ** 2\n num5 = sides[2] ** 2 + sides[0] ** 2\n num6 = sides[1] ** 2\n if num1 == num2:\n print(call.affirmative)\n elif num3 == num4:\n print(call.affirmative)\n elif num5 == num6:\n print(call.affirmative)\n else:\n print(call.negative)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000282", "length_bytes": 2265, "license_type": "permissive", "methods": [{"docstring": "Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides", "name": "list_input", "signature": "def list_input(self)"}, {"docstring": "Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.", "name": "pythag_check", "signature": "def pythag_check(self, sides)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_044473", "prompt": "Implement the Python class `Calculate` described below.\n\nClass description:\nImplement the Calculate class.\n\nMethod signatures and docstrings:\n- def __init__(self): Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative\n- def list_input(self): Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides\n- def pythag_check(self, sides): Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.", "prompted_full_text": "Implement the Python class `Calculate` described below.\n\nClass description:\nImplement the Calculate class.\n\nMethod signatures and docstrings:\n- def __init__(self): Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative\n- def list_input(self): Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides\n- def pythag_check(self, sides): Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.\n\n<|skeleton|>\nclass Calculate:\n\n def __init__(self):\n \"\"\"Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative\"\"\"\n <|body_0|>\n\n def list_input(self):\n \"\"\"Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides\"\"\"\n <|body_1|>\n\n def pythag_check(self, sides):\n \"\"\"Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n affirmative = 'This is a pythagorean triple.'\n self.affirmative = affirmative\n negative = 'This is not a pythagorean triple.'\n self.negative = negative\n<|end_body_0|>\n\n<|body_start_1|>\n count = 0\n sides = []\n while count < 3:\n answers = float(input('Please enter the sides of the triangle: '))\n sides.append(answers)\n count += 1\n return sides\n<|end_body_1|>\n\n<|body_start_2|>\n call = Calculate()\n num1 = sides[0] ** 2 + sides[1] ** 2\n num2 = sides[2] ** 2\n num3 = sides[2] ** 2 + sides[1] ** 2\n num4 = sides[0] ** 2\n num5 = sides[2] ** 2 + sides[0] ** 2\n num6 = sides[1] ** 2\n if num1 == num2:\n print(call.affirmative)\n elif num3 == num4:\n print(call.affirmative)\n elif num5 == num6:\n print(call.affirmative)\n else:\n print(call.negative)\n<|end_body_2|>\n", "revision_id": "df46c8bb8e4c8ba6d34898cd13cdb0348eb4e74d", "skeleton": "<|skeleton|>\nclass Calculate:\n\n def __init__(self):\n \"\"\"Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative\"\"\"\n <|body_0|>\n\n def list_input(self):\n \"\"\"Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides\"\"\"\n <|body_1|>\n\n def pythag_check(self, sides):\n \"\"\"Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Calculate:\n def __init__(self):\n \"\"\"Description: This method contains attributes that provide responses. Args: None. Returns: affirmative, negative\"\"\"\n affirmative = 'This is a pythagorean triple.'\n self.affirmative = affirmative\n negative = 'This is not a pythagorean triple.'\n self.negative = negative\n\n def list_input(self):\n \"\"\"Description: This method prompts the user 3 times in order to get all the sides of the triangle. Args: None. Returns: sides\"\"\"\n count = 0\n sides = []\n while count < 3:\n answers = float(input('Please enter the sides of the triangle: '))\n sides.append(answers)\n count += 1\n return sides\n\n def pythag_check(self, sides):\n \"\"\"Description: This method utilizes a^2 + b^2 = c^2 to determine if any variant of sides entered is a pythagorean triple. Args: sides Returns: None.\"\"\"\n call = Calculate()\n num1 = sides[0] ** 2 + sides[1] ** 2\n num2 = sides[2] ** 2\n num3 = sides[2] ** 2 + sides[1] ** 2\n num4 = sides[0] ** 2\n num5 = sides[2] ** 2 + sides[0] ** 2\n num6 = sides[1] ** 2\n if num1 == num2:\n print(call.affirmative)\n elif num3 == num4:\n print(call.affirmative)\n elif num5 == num6:\n print(call.affirmative)\n else:\n print(call.negative)\n", "source": "the_stack_v2_python_sparse", "source_path": "pythagorean_triple.py", "source_repo": "tangowithfoxtrot/beginner_project_solutions", "split": "test", "star_events_count": 0} {"blob_id": "5d4e7e63b3a4bd50d4488ea5c646184fd99d384f", "bodies": ["self.mock_collection.find_one.return_value = None\nfor uuid, url in self.articles.iteritems():\n response = self.application.get(self.base_url + str(uuid))\n self.assertIn('404', response.status)", "for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 4, 12, 639560)}\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n self.assertIn('404', response.status)", "for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 16, 20, 77773), 'etag': 'bf6285d832a356e1bf509a63edc8870f', 'parsed_at': datetime.datetime(2013, 8, 4, 14, 16, 21, 77773), 'size': 31052, 'text_container_name': '44d85795', 'text_object_name': '248d-5899-b8ca-ac2bd8233755'}\n mock_object = self._get_attached_mock(self.mock_container.get_object)\n mock_object.fetch.return_value = 'Redacted for testing purposes'\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n mock_object.fetch.assert_called_once_with()\n self.assertIn('200', response.status)\n self.assertEqual('application/json', response.headers.get('Content-Type'))\n self.assertEqual('http://margarine.raxsavvy.com', response.headers.get('Access-Control-Allow-Origin'))"], "bodies_text": "<|body_start_0|>\n self.mock_collection.find_one.return_value = None\n for uuid, url in self.articles.iteritems():\n response = self.application.get(self.base_url + str(uuid))\n self.assertIn('404', response.status)\n<|end_body_0|>\n\n<|body_start_1|>\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 4, 12, 639560)}\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n self.assertIn('404', response.status)\n<|end_body_1|>\n\n<|body_start_2|>\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 16, 20, 77773), 'etag': 'bf6285d832a356e1bf509a63edc8870f', 'parsed_at': datetime.datetime(2013, 8, 4, 14, 16, 21, 77773), 'size': 31052, 'text_container_name': '44d85795', 'text_object_name': '248d-5899-b8ca-ac2bd8233755'}\n mock_object = self._get_attached_mock(self.mock_container.get_object)\n mock_object.fetch.return_value = 'Redacted for testing purposes'\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n mock_object.fetch.assert_called_once_with()\n self.assertIn('200', response.status)\n self.assertEqual('application/json', response.headers.get('Content-Type'))\n self.assertEqual('http://margarine.raxsavvy.com', response.headers.get('Access-Control-Allow-Origin'))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "BlendArticleReadTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlendArticleReadTest:\n\n def test_article_read_unsubmitted(self):\n \"\"\"Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.\"\"\"\n <|body_0|>\n\n def test_article_read_submitted_incomplete(self):\n \"\"\"Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at\"\"\"\n <|body_1|>\n\n def test_article_read_submitted_complete(self):\n \"\"\"Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mock_collection.find_one.return_value = None\n for uuid, url in self.articles.iteritems():\n response = self.application.get(self.base_url + str(uuid))\n self.assertIn('404', response.status)\n<|end_body_0|>\n\n<|body_start_1|>\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 4, 12, 639560)}\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n self.assertIn('404', response.status)\n<|end_body_1|>\n\n<|body_start_2|>\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 16, 20, 77773), 'etag': 'bf6285d832a356e1bf509a63edc8870f', 'parsed_at': datetime.datetime(2013, 8, 4, 14, 16, 21, 77773), 'size': 31052, 'text_container_name': '44d85795', 'text_object_name': '248d-5899-b8ca-ac2bd8233755'}\n mock_object = self._get_attached_mock(self.mock_container.get_object)\n mock_object.fetch.return_value = 'Redacted for testing purposes'\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n mock_object.fetch.assert_called_once_with()\n self.assertIn('200', response.status)\n self.assertEqual('application/json', response.headers.get('Content-Type'))\n self.assertEqual('http://margarine.raxsavvy.com', response.headers.get('Access-Control-Allow-Origin'))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000283", "length_bytes": 5838, "license_type": "no_license", "methods": [{"docstring": "Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.", "name": "test_article_read_unsubmitted", "signature": "def test_article_read_unsubmitted(self)"}, {"docstring": "Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at", "name": "test_article_read_submitted_incomplete", "signature": "def test_article_read_submitted_incomplete(self)"}, {"docstring": "Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization", "name": "test_article_read_submitted_complete", "signature": "def test_article_read_submitted_complete(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_052772", "prompt": "Implement the Python class `BlendArticleReadTest` described below.\n\nClass description:\nImplement the BlendArticleReadTest class.\n\nMethod signatures and docstrings:\n- def test_article_read_unsubmitted(self): Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.\n- def test_article_read_submitted_incomplete(self): Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at\n- def test_article_read_submitted_complete(self): Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization", "prompted_full_text": "Implement the Python class `BlendArticleReadTest` described below.\n\nClass description:\nImplement the BlendArticleReadTest class.\n\nMethod signatures and docstrings:\n- def test_article_read_unsubmitted(self): Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.\n- def test_article_read_submitted_incomplete(self): Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at\n- def test_article_read_submitted_complete(self): Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization\n\n<|skeleton|>\nclass BlendArticleReadTest:\n\n def test_article_read_unsubmitted(self):\n \"\"\"Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.\"\"\"\n <|body_0|>\n\n def test_article_read_submitted_incomplete(self):\n \"\"\"Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at\"\"\"\n <|body_1|>\n\n def test_article_read_submitted_complete(self):\n \"\"\"Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.mock_collection.find_one.return_value = None\n for uuid, url in self.articles.iteritems():\n response = self.application.get(self.base_url + str(uuid))\n self.assertIn('404', response.status)\n<|end_body_0|>\n\n<|body_start_1|>\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 4, 12, 639560)}\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n self.assertIn('404', response.status)\n<|end_body_1|>\n\n<|body_start_2|>\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 16, 20, 77773), 'etag': 'bf6285d832a356e1bf509a63edc8870f', 'parsed_at': datetime.datetime(2013, 8, 4, 14, 16, 21, 77773), 'size': 31052, 'text_container_name': '44d85795', 'text_object_name': '248d-5899-b8ca-ac2bd8233755'}\n mock_object = self._get_attached_mock(self.mock_container.get_object)\n mock_object.fetch.return_value = 'Redacted for testing purposes'\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n mock_object.fetch.assert_called_once_with()\n self.assertIn('200', response.status)\n self.assertEqual('application/json', response.headers.get('Content-Type'))\n self.assertEqual('http://margarine.raxsavvy.com', response.headers.get('Access-Control-Allow-Origin'))\n<|end_body_2|>\n", "revision_id": "c909a8604bbcd2200b73284d94757c34490b5f0a", "skeleton": "<|skeleton|>\nclass BlendArticleReadTest:\n\n def test_article_read_unsubmitted(self):\n \"\"\"Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.\"\"\"\n <|body_0|>\n\n def test_article_read_submitted_incomplete(self):\n \"\"\"Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at\"\"\"\n <|body_1|>\n\n def test_article_read_submitted_complete(self):\n \"\"\"Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BlendArticleReadTest:\n def test_article_read_unsubmitted(self):\n \"\"\"Blend::Article Read—Unsubmitted .. note:: The article in question has not been submitted and thus nothing exists in the system for the requested article.\"\"\"\n self.mock_collection.find_one.return_value = None\n for uuid, url in self.articles.iteritems():\n response = self.application.get(self.base_url + str(uuid))\n self.assertIn('404', response.status)\n\n def test_article_read_submitted_incomplete(self):\n \"\"\"Blend::Article Read—Submitted,Incomplete .. note:: The article in question has been submitted but the spread process has not populated any information beyond the first consumption. * created_at\"\"\"\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 4, 12, 639560)}\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n self.assertIn('404', response.status)\n\n def test_article_read_submitted_complete(self):\n \"\"\"Blend::Article Read—Submitted,Complete .. note:: The article in question has been submitted and the spread process has finished processing the following items: * HTML Sanitization\"\"\"\n for uuid, url in self.articles.iteritems():\n self.mock_collection.find_one.return_value = {'_id': uuid.hex, 'url': url, 'created_at': datetime.datetime(2013, 8, 4, 14, 16, 20, 77773), 'etag': 'bf6285d832a356e1bf509a63edc8870f', 'parsed_at': datetime.datetime(2013, 8, 4, 14, 16, 21, 77773), 'size': 31052, 'text_container_name': '44d85795', 'text_object_name': '248d-5899-b8ca-ac2bd8233755'}\n mock_object = self._get_attached_mock(self.mock_container.get_object)\n mock_object.fetch.return_value = 'Redacted for testing purposes'\n response = self.application.get(self.base_url + str(uuid))\n self.mock_collection.find_one.assert_called_once_with({'_id': uuid.hex})\n self.mock_collection.reset_mock()\n mock_object.fetch.assert_called_once_with()\n self.assertIn('200', response.status)\n self.assertEqual('application/json', response.headers.get('Content-Type'))\n self.assertEqual('http://margarine.raxsavvy.com', response.headers.get('Access-Control-Allow-Origin'))\n", "source": "the_stack_v2_python_sparse", "source_path": "test_margarine/test_unit/test_blend/test_article.py", "source_repo": "perniferuse/margarine", "split": "test", "star_events_count": 0} {"blob_id": "b81cd6b9f4dfa3b5af7737b07efbe773c1741608", "bodies": ["super(TRUNET_Encoder, self).__init__()\nself.encoder_params = encoder_params\nself.t_params = t_params\nself.layer_count = encoder_params['enc_layer_count']\nself.CGRU_Input_Layer = TRUNET_CGRU_Input_Layer(t_params, encoder_params['CGRUs_params'][0])\nself.CGRU_Attn_layers = []\nfor idx in range(encoder_params['attn_layers_count']):\n _layer = TRUNET_CGRU_Attention_Layer(t_params, encoder_params['CGRUs_params'][idx + 1], encoder_params['ATTN_params'][idx], encoder_params['ATTN_DOWNSCALING_params_enc'], encoder_params['seq_len_factor_reduction'][idx], self.encoder_params['attn_layers_num_of_splits'][idx], h_w, attn_ablation)\n self.CGRU_Attn_layers.append(_layer)", "hidden_state = self.CGRU_Input_Layer(_input, training)\nhidden_state = self.CGRU_Attn_layers[0](hidden_state, training=training)\nhidden_states = hidden_state\nfor idx in range(1, self.encoder_params['attn_layers_count']):\n hidden_state = self.CGRU_Attn_layers[idx](hidden_state, training=training)\n hidden_states = tf.concat([hidden_states, hidden_state], axis=1)\nreturn hidden_states"], "bodies_text": "<|body_start_0|>\n super(TRUNET_Encoder, self).__init__()\n self.encoder_params = encoder_params\n self.t_params = t_params\n self.layer_count = encoder_params['enc_layer_count']\n self.CGRU_Input_Layer = TRUNET_CGRU_Input_Layer(t_params, encoder_params['CGRUs_params'][0])\n self.CGRU_Attn_layers = []\n for idx in range(encoder_params['attn_layers_count']):\n _layer = TRUNET_CGRU_Attention_Layer(t_params, encoder_params['CGRUs_params'][idx + 1], encoder_params['ATTN_params'][idx], encoder_params['ATTN_DOWNSCALING_params_enc'], encoder_params['seq_len_factor_reduction'][idx], self.encoder_params['attn_layers_num_of_splits'][idx], h_w, attn_ablation)\n self.CGRU_Attn_layers.append(_layer)\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_state = self.CGRU_Input_Layer(_input, training)\n hidden_state = self.CGRU_Attn_layers[0](hidden_state, training=training)\n hidden_states = hidden_state\n for idx in range(1, self.encoder_params['attn_layers_count']):\n hidden_state = self.CGRU_Attn_layers[idx](hidden_state, training=training)\n hidden_states = tf.concat([hidden_states, hidden_state], axis=1)\n return hidden_states\n<|end_body_1|>\n", "class_docstring": "TRU-NET Encoder-Decoder Encoder", "class_name": "TRUNET_Encoder", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TRUNET_Encoder:\n \"\"\"TRU-NET Encoder-Decoder Encoder\"\"\"\n\n def __init__(self, t_params, encoder_params, h_w, attn_ablation=0):\n \"\"\"Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention\"\"\"\n <|body_0|>\n\n def call(self, _input, training=True):\n \"\"\"[summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TRUNET_Encoder, self).__init__()\n self.encoder_params = encoder_params\n self.t_params = t_params\n self.layer_count = encoder_params['enc_layer_count']\n self.CGRU_Input_Layer = TRUNET_CGRU_Input_Layer(t_params, encoder_params['CGRUs_params'][0])\n self.CGRU_Attn_layers = []\n for idx in range(encoder_params['attn_layers_count']):\n _layer = TRUNET_CGRU_Attention_Layer(t_params, encoder_params['CGRUs_params'][idx + 1], encoder_params['ATTN_params'][idx], encoder_params['ATTN_DOWNSCALING_params_enc'], encoder_params['seq_len_factor_reduction'][idx], self.encoder_params['attn_layers_num_of_splits'][idx], h_w, attn_ablation)\n self.CGRU_Attn_layers.append(_layer)\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_state = self.CGRU_Input_Layer(_input, training)\n hidden_state = self.CGRU_Attn_layers[0](hidden_state, training=training)\n hidden_states = hidden_state\n for idx in range(1, self.encoder_params['attn_layers_count']):\n hidden_state = self.CGRU_Attn_layers[idx](hidden_state, training=training)\n hidden_states = tf.concat([hidden_states, hidden_state], axis=1)\n return hidden_states\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000284", "length_bytes": 17462, "license_type": "permissive", "methods": [{"docstring": "Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention", "name": "__init__", "signature": "def __init__(self, t_params, encoder_params, h_w, attn_ablation=0)"}, {"docstring": "[summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)", "name": "call", "signature": "def call(self, _input, training=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007944", "prompt": "Implement the Python class `TRUNET_Encoder` described below.\n\nClass description:\nTRU-NET Encoder-Decoder Encoder\n\nMethod signatures and docstrings:\n- def __init__(self, t_params, encoder_params, h_w, attn_ablation=0): Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention\n- def call(self, _input, training=True): [summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)", "prompted_full_text": "Implement the Python class `TRUNET_Encoder` described below.\n\nClass description:\nTRU-NET Encoder-Decoder Encoder\n\nMethod signatures and docstrings:\n- def __init__(self, t_params, encoder_params, h_w, attn_ablation=0): Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention\n- def call(self, _input, training=True): [summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)\n\n<|skeleton|>\nclass TRUNET_Encoder:\n \"\"\"TRU-NET Encoder-Decoder Encoder\"\"\"\n\n def __init__(self, t_params, encoder_params, h_w, attn_ablation=0):\n \"\"\"Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention\"\"\"\n <|body_0|>\n\n def call(self, _input, training=True):\n \"\"\"[summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(TRUNET_Encoder, self).__init__()\n self.encoder_params = encoder_params\n self.t_params = t_params\n self.layer_count = encoder_params['enc_layer_count']\n self.CGRU_Input_Layer = TRUNET_CGRU_Input_Layer(t_params, encoder_params['CGRUs_params'][0])\n self.CGRU_Attn_layers = []\n for idx in range(encoder_params['attn_layers_count']):\n _layer = TRUNET_CGRU_Attention_Layer(t_params, encoder_params['CGRUs_params'][idx + 1], encoder_params['ATTN_params'][idx], encoder_params['ATTN_DOWNSCALING_params_enc'], encoder_params['seq_len_factor_reduction'][idx], self.encoder_params['attn_layers_num_of_splits'][idx], h_w, attn_ablation)\n self.CGRU_Attn_layers.append(_layer)\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_state = self.CGRU_Input_Layer(_input, training)\n hidden_state = self.CGRU_Attn_layers[0](hidden_state, training=training)\n hidden_states = hidden_state\n for idx in range(1, self.encoder_params['attn_layers_count']):\n hidden_state = self.CGRU_Attn_layers[idx](hidden_state, training=training)\n hidden_states = tf.concat([hidden_states, hidden_state], axis=1)\n return hidden_states\n<|end_body_1|>\n", "revision_id": "12dff08f2361848e13b0952540e2198db386eab8", "skeleton": "<|skeleton|>\nclass TRUNET_Encoder:\n \"\"\"TRU-NET Encoder-Decoder Encoder\"\"\"\n\n def __init__(self, t_params, encoder_params, h_w, attn_ablation=0):\n \"\"\"Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention\"\"\"\n <|body_0|>\n\n def call(self, _input, training=True):\n \"\"\"[summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TRUNET_Encoder:\n \"\"\"TRU-NET Encoder-Decoder Encoder\"\"\"\n\n def __init__(self, t_params, encoder_params, h_w, attn_ablation=0):\n \"\"\"Args: t_params (dict): params related to training/testing encoder_params (dict): params related to encoder h_w ([type]): height and width of convolution output for this layer attn_ablation (int, optional): ablation mode for encoder layers. Defaults to 0 for cross attention 1 = Averaging 2 = Concatenation 3 = Last hidden state 4 = Self Attention\"\"\"\n super(TRUNET_Encoder, self).__init__()\n self.encoder_params = encoder_params\n self.t_params = t_params\n self.layer_count = encoder_params['enc_layer_count']\n self.CGRU_Input_Layer = TRUNET_CGRU_Input_Layer(t_params, encoder_params['CGRUs_params'][0])\n self.CGRU_Attn_layers = []\n for idx in range(encoder_params['attn_layers_count']):\n _layer = TRUNET_CGRU_Attention_Layer(t_params, encoder_params['CGRUs_params'][idx + 1], encoder_params['ATTN_params'][idx], encoder_params['ATTN_DOWNSCALING_params_enc'], encoder_params['seq_len_factor_reduction'][idx], self.encoder_params['attn_layers_num_of_splits'][idx], h_w, attn_ablation)\n self.CGRU_Attn_layers.append(_layer)\n\n def call(self, _input, training=True):\n \"\"\"[summary] Args: _input ([type]): (batch_size, seq_len, h, w, c) training (bool, optional): [description]. Defaults to True. Returns: [type]: (batch_size, seq_len1, h1, w1, c1)\"\"\"\n hidden_state = self.CGRU_Input_Layer(_input, training)\n hidden_state = self.CGRU_Attn_layers[0](hidden_state, training=training)\n hidden_states = hidden_state\n for idx in range(1, self.encoder_params['attn_layers_count']):\n hidden_state = self.CGRU_Attn_layers[idx](hidden_state, training=training)\n hidden_states = tf.concat([hidden_states, hidden_state], axis=1)\n return hidden_states\n", "source": "the_stack_v2_python_sparse", "source_path": "layers.py", "source_repo": "siyuan-qx/TRUNET", "split": "test", "star_events_count": 0} {"blob_id": "6f405441aab3eba44ad2fa7ecf9cb7f24d2b8089", "bodies": ["key = jax.random.PRNGKey(1)\nshape = tuple(shape)\nreturn cls(n=0, mean=flax.nn.initializers.zeros(key, shape, dtype), mean_abs=flax.nn.initializers.zeros(key, shape, dtype), mean_sq=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_maximum=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_minimum=flax.nn.initializers.zeros(key, shape, dtype))", "if mask is None:\n mask = jnp.full(samples.shape, True)\nshape_utils.assert_shapes_compatible(samples.shape, mask.shape)\nmask = jnp.broadcast_to(mask, samples.shape)\nif exclude_zeros:\n mask = mask & (samples != 0)\n\ndef _moving_avg(old_avg, new_val, masked_reduction_fn):\n masked_new_val_reduced = masked_reduction_fn(new_val, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=True)\n valid_mask = jnp.isfinite(masked_new_val_reduced)\n delta = jnp.where(valid_mask, masked_new_val_reduced - old_avg, 0)\n new_avg = old_avg + alpha * delta\n return new_avg\nnew_n = stats.n + 1\nif alpha is None:\n alpha = 1.0 / new_n\nnew_mean = _moving_avg(stats.mean, samples, masked_reduction_fn=masked_mean)\nnew_mean_abs = _moving_avg(stats.mean_abs, jnp.abs(samples), masked_reduction_fn=masked_mean)\nnew_mean_sq = _moving_avg(stats.mean_sq, jnp.square(samples), masked_reduction_fn=masked_mean)\nnew_mean_batch_minimum = _moving_avg(stats.mean_batch_minimum, samples, masked_reduction_fn=masked_mean_of_min)\nnew_mean_batch_maximum = _moving_avg(stats.mean_batch_maximum, samples, masked_reduction_fn=masked_mean_of_max)\nreturn cls(n=new_n, mean=new_mean, mean_abs=new_mean_abs, mean_sq=new_mean_sq, mean_batch_minimum=new_mean_batch_minimum, mean_batch_maximum=new_mean_batch_maximum)"], "bodies_text": "<|body_start_0|>\n key = jax.random.PRNGKey(1)\n shape = tuple(shape)\n return cls(n=0, mean=flax.nn.initializers.zeros(key, shape, dtype), mean_abs=flax.nn.initializers.zeros(key, shape, dtype), mean_sq=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_maximum=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_minimum=flax.nn.initializers.zeros(key, shape, dtype))\n<|end_body_0|>\n\n<|body_start_1|>\n if mask is None:\n mask = jnp.full(samples.shape, True)\n shape_utils.assert_shapes_compatible(samples.shape, mask.shape)\n mask = jnp.broadcast_to(mask, samples.shape)\n if exclude_zeros:\n mask = mask & (samples != 0)\n\n def _moving_avg(old_avg, new_val, masked_reduction_fn):\n masked_new_val_reduced = masked_reduction_fn(new_val, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=True)\n valid_mask = jnp.isfinite(masked_new_val_reduced)\n delta = jnp.where(valid_mask, masked_new_val_reduced - old_avg, 0)\n new_avg = old_avg + alpha * delta\n return new_avg\n new_n = stats.n + 1\n if alpha is None:\n alpha = 1.0 / new_n\n new_mean = _moving_avg(stats.mean, samples, masked_reduction_fn=masked_mean)\n new_mean_abs = _moving_avg(stats.mean_abs, jnp.abs(samples), masked_reduction_fn=masked_mean)\n new_mean_sq = _moving_avg(stats.mean_sq, jnp.square(samples), masked_reduction_fn=masked_mean)\n new_mean_batch_minimum = _moving_avg(stats.mean_batch_minimum, samples, masked_reduction_fn=masked_mean_of_min)\n new_mean_batch_maximum = _moving_avg(stats.mean_batch_maximum, samples, masked_reduction_fn=masked_mean_of_max)\n return cls(n=new_n, mean=new_mean, mean_abs=new_mean_abs, mean_sq=new_mean_sq, mean_batch_minimum=new_mean_batch_minimum, mean_batch_maximum=new_mean_batch_maximum)\n<|end_body_1|>\n", "class_docstring": "Dataclass to keep track of statistics.", "class_name": "Stats", "detected_licenses": ["CC-BY-4.0", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Stats:\n \"\"\"Dataclass to keep track of statistics.\"\"\"\n\n def stats_initializer(cls, shape, *, dtype=jnp.float32):\n \"\"\"Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.\"\"\"\n <|body_0|>\n\n def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False):\n \"\"\"Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n key = jax.random.PRNGKey(1)\n shape = tuple(shape)\n return cls(n=0, mean=flax.nn.initializers.zeros(key, shape, dtype), mean_abs=flax.nn.initializers.zeros(key, shape, dtype), mean_sq=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_maximum=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_minimum=flax.nn.initializers.zeros(key, shape, dtype))\n<|end_body_0|>\n\n<|body_start_1|>\n if mask is None:\n mask = jnp.full(samples.shape, True)\n shape_utils.assert_shapes_compatible(samples.shape, mask.shape)\n mask = jnp.broadcast_to(mask, samples.shape)\n if exclude_zeros:\n mask = mask & (samples != 0)\n\n def _moving_avg(old_avg, new_val, masked_reduction_fn):\n masked_new_val_reduced = masked_reduction_fn(new_val, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=True)\n valid_mask = jnp.isfinite(masked_new_val_reduced)\n delta = jnp.where(valid_mask, masked_new_val_reduced - old_avg, 0)\n new_avg = old_avg + alpha * delta\n return new_avg\n new_n = stats.n + 1\n if alpha is None:\n alpha = 1.0 / new_n\n new_mean = _moving_avg(stats.mean, samples, masked_reduction_fn=masked_mean)\n new_mean_abs = _moving_avg(stats.mean_abs, jnp.abs(samples), masked_reduction_fn=masked_mean)\n new_mean_sq = _moving_avg(stats.mean_sq, jnp.square(samples), masked_reduction_fn=masked_mean)\n new_mean_batch_minimum = _moving_avg(stats.mean_batch_minimum, samples, masked_reduction_fn=masked_mean_of_min)\n new_mean_batch_maximum = _moving_avg(stats.mean_batch_maximum, samples, masked_reduction_fn=masked_mean_of_max)\n return cls(n=new_n, mean=new_mean, mean_abs=new_mean_abs, mean_sq=new_mean_sq, mean_batch_minimum=new_mean_batch_minimum, mean_batch_maximum=new_mean_batch_maximum)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000285", "length_bytes": 9266, "license_type": "permissive", "methods": [{"docstring": "Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.", "name": "stats_initializer", "signature": "def stats_initializer(cls, shape, *, dtype=jnp.float32)"}, {"docstring": "Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu", "name": "create_updated_stats", "signature": "def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012899", "prompt": "Implement the Python class `Stats` described below.\n\nClass description:\nDataclass to keep track of statistics.\n\nMethod signatures and docstrings:\n- def stats_initializer(cls, shape, *, dtype=jnp.float32): Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.\n- def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False): Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu", "prompted_full_text": "Implement the Python class `Stats` described below.\n\nClass description:\nDataclass to keep track of statistics.\n\nMethod signatures and docstrings:\n- def stats_initializer(cls, shape, *, dtype=jnp.float32): Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.\n- def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False): Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu\n\n<|skeleton|>\nclass Stats:\n \"\"\"Dataclass to keep track of statistics.\"\"\"\n\n def stats_initializer(cls, shape, *, dtype=jnp.float32):\n \"\"\"Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.\"\"\"\n <|body_0|>\n\n def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False):\n \"\"\"Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n key = jax.random.PRNGKey(1)\n shape = tuple(shape)\n return cls(n=0, mean=flax.nn.initializers.zeros(key, shape, dtype), mean_abs=flax.nn.initializers.zeros(key, shape, dtype), mean_sq=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_maximum=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_minimum=flax.nn.initializers.zeros(key, shape, dtype))\n<|end_body_0|>\n\n<|body_start_1|>\n if mask is None:\n mask = jnp.full(samples.shape, True)\n shape_utils.assert_shapes_compatible(samples.shape, mask.shape)\n mask = jnp.broadcast_to(mask, samples.shape)\n if exclude_zeros:\n mask = mask & (samples != 0)\n\n def _moving_avg(old_avg, new_val, masked_reduction_fn):\n masked_new_val_reduced = masked_reduction_fn(new_val, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=True)\n valid_mask = jnp.isfinite(masked_new_val_reduced)\n delta = jnp.where(valid_mask, masked_new_val_reduced - old_avg, 0)\n new_avg = old_avg + alpha * delta\n return new_avg\n new_n = stats.n + 1\n if alpha is None:\n alpha = 1.0 / new_n\n new_mean = _moving_avg(stats.mean, samples, masked_reduction_fn=masked_mean)\n new_mean_abs = _moving_avg(stats.mean_abs, jnp.abs(samples), masked_reduction_fn=masked_mean)\n new_mean_sq = _moving_avg(stats.mean_sq, jnp.square(samples), masked_reduction_fn=masked_mean)\n new_mean_batch_minimum = _moving_avg(stats.mean_batch_minimum, samples, masked_reduction_fn=masked_mean_of_min)\n new_mean_batch_maximum = _moving_avg(stats.mean_batch_maximum, samples, masked_reduction_fn=masked_mean_of_max)\n return cls(n=new_n, mean=new_mean, mean_abs=new_mean_abs, mean_sq=new_mean_sq, mean_batch_minimum=new_mean_batch_minimum, mean_batch_maximum=new_mean_batch_maximum)\n<|end_body_1|>\n", "revision_id": "320a49f768cea27200044c0d12f394aa6c795feb", "skeleton": "<|skeleton|>\nclass Stats:\n \"\"\"Dataclass to keep track of statistics.\"\"\"\n\n def stats_initializer(cls, shape, *, dtype=jnp.float32):\n \"\"\"Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.\"\"\"\n <|body_0|>\n\n def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False):\n \"\"\"Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Stats:\n \"\"\"Dataclass to keep track of statistics.\"\"\"\n\n def stats_initializer(cls, shape, *, dtype=jnp.float32):\n \"\"\"Constructor to init a empty Stats instance. Args: shape: shape of the statistics (mean, mean_sq and mean_abs) dtype: the dtype of the stats (default: float32). Returns: A new instance of Stats, with statistics intialized to 0.\"\"\"\n key = jax.random.PRNGKey(1)\n shape = tuple(shape)\n return cls(n=0, mean=flax.nn.initializers.zeros(key, shape, dtype), mean_abs=flax.nn.initializers.zeros(key, shape, dtype), mean_sq=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_maximum=flax.nn.initializers.zeros(key, shape, dtype), mean_batch_minimum=flax.nn.initializers.zeros(key, shape, dtype))\n\n def create_updated_stats(cls, stats, samples, *, axis=None, paxis_name=None, alpha=None, mask=None, exclude_zeros=False):\n \"\"\"Create a new Stats instance that represents the updated statistics. Since flax.struct.dataclass objects are frozen, this method creates a new instance of Stats with updated stats and returns it. Args: stats: A Stats dataclass object to be updated. samples: An array to update the current statistics with. axis: axis to average input samples over, e.g. to calculate stats per channel. paxis_name: the axis name used to combine batch statistics from multiple devices. See `jax.pmap` for a description of axis names. alpha: Smoothing parameter to use for moving average. If None, will use 1/n, where n is the stat count. mask: Optional boolean tensor of the same shape as 'samples' specifying which valu\"\"\"\n if mask is None:\n mask = jnp.full(samples.shape, True)\n shape_utils.assert_shapes_compatible(samples.shape, mask.shape)\n mask = jnp.broadcast_to(mask, samples.shape)\n if exclude_zeros:\n mask = mask & (samples != 0)\n\n def _moving_avg(old_avg, new_val, masked_reduction_fn):\n masked_new_val_reduced = masked_reduction_fn(new_val, mask=mask, axis=axis, paxis_name=paxis_name, keepdims=True)\n valid_mask = jnp.isfinite(masked_new_val_reduced)\n delta = jnp.where(valid_mask, masked_new_val_reduced - old_avg, 0)\n new_avg = old_avg + alpha * delta\n return new_avg\n new_n = stats.n + 1\n if alpha is None:\n alpha = 1.0 / new_n\n new_mean = _moving_avg(stats.mean, samples, masked_reduction_fn=masked_mean)\n new_mean_abs = _moving_avg(stats.mean_abs, jnp.abs(samples), masked_reduction_fn=masked_mean)\n new_mean_sq = _moving_avg(stats.mean_sq, jnp.square(samples), masked_reduction_fn=masked_mean)\n new_mean_batch_minimum = _moving_avg(stats.mean_batch_minimum, samples, masked_reduction_fn=masked_mean_of_min)\n new_mean_batch_maximum = _moving_avg(stats.mean_batch_maximum, samples, masked_reduction_fn=masked_mean_of_max)\n return cls(n=new_n, mean=new_mean, mean_abs=new_mean_abs, mean_sq=new_mean_sq, mean_batch_minimum=new_mean_batch_minimum, mean_batch_maximum=new_mean_batch_maximum)\n", "source": "the_stack_v2_python_sparse", "source_path": "aqt/jax/stats.py", "source_repo": "afcarl/google-research", "split": "test", "star_events_count": 1} {"blob_id": "adde843632c32fac214a5affba2b00cb98207b36", "bodies": ["test = '20 40 60 80 100\\n0 1 2 3 4\\n1 0'\nd = Forces(test)\nself.assertEqual(d.numw, [0, 1, 2, 3, 4])\nself.assertEqual(Forces(test).calculate(), '4900')\ntest = '119 119 119 119 119\\n0 0 0 0 0\\n10 0'\nself.assertEqual(Forces(test).calculate(), '4930')\ntest = ''\ntest = ''", "import random\nimport timeit\ntest = str(nmax) + ' ' + str(nmax) + '\\n'\nnumnums = [str(i) + ' ' + str(i + 1) for i in range(nmax)]\ntest += '\\n'.join(numnums) + '\\n'\nnums = [random.randint(1, 10000) for i in range(nmax)]\ntest += ' '.join(map(str, nums)) + '\\n'\nstart = timeit.default_timer()\nd = Forces(test)\ncalc = timeit.default_timer()\nd.calculate()\nstop = timeit.default_timer()\nprint('\\nTimelimit Test: ' + '{0:.3f}s (init {1:.3f}s calc {2:.3f}s)'.format(stop - start, calc - start, stop - calc))"], "bodies_text": "<|body_start_0|>\n test = '20 40 60 80 100\\n0 1 2 3 4\\n1 0'\n d = Forces(test)\n self.assertEqual(d.numw, [0, 1, 2, 3, 4])\n self.assertEqual(Forces(test).calculate(), '4900')\n test = '119 119 119 119 119\\n0 0 0 0 0\\n10 0'\n self.assertEqual(Forces(test).calculate(), '4930')\n test = ''\n test = ''\n<|end_body_0|>\n\n<|body_start_1|>\n import random\n import timeit\n test = str(nmax) + ' ' + str(nmax) + '\\n'\n numnums = [str(i) + ' ' + str(i + 1) for i in range(nmax)]\n test += '\\n'.join(numnums) + '\\n'\n nums = [random.randint(1, 10000) for i in range(nmax)]\n test += ' '.join(map(str, nums)) + '\\n'\n start = timeit.default_timer()\n d = Forces(test)\n calc = timeit.default_timer()\n d.calculate()\n stop = timeit.default_timer()\n print('\\nTimelimit Test: ' + '{0:.3f}s (init {1:.3f}s calc {2:.3f}s)'.format(stop - start, calc - start, stop - calc))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "unitTests", "detected_licenses": ["Unlicense", "LicenseRef-scancode-public-domain"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass unitTests:\n\n def test_single_test(self):\n \"\"\"Forces class testing\"\"\"\n <|body_0|>\n\n def time_limit_test(self, nmax):\n \"\"\"Timelimit testing\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test = '20 40 60 80 100\\n0 1 2 3 4\\n1 0'\n d = Forces(test)\n self.assertEqual(d.numw, [0, 1, 2, 3, 4])\n self.assertEqual(Forces(test).calculate(), '4900')\n test = '119 119 119 119 119\\n0 0 0 0 0\\n10 0'\n self.assertEqual(Forces(test).calculate(), '4930')\n test = ''\n test = ''\n<|end_body_0|>\n\n<|body_start_1|>\n import random\n import timeit\n test = str(nmax) + ' ' + str(nmax) + '\\n'\n numnums = [str(i) + ' ' + str(i + 1) for i in range(nmax)]\n test += '\\n'.join(numnums) + '\\n'\n nums = [random.randint(1, 10000) for i in range(nmax)]\n test += ' '.join(map(str, nums)) + '\\n'\n start = timeit.default_timer()\n d = Forces(test)\n calc = timeit.default_timer()\n d.calculate()\n stop = timeit.default_timer()\n print('\\nTimelimit Test: ' + '{0:.3f}s (init {1:.3f}s calc {2:.3f}s)'.format(stop - start, calc - start, stop - calc))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000286", "length_bytes": 3117, "license_type": "permissive", "methods": [{"docstring": "Forces class testing", "name": "test_single_test", "signature": "def test_single_test(self)"}, {"docstring": "Timelimit testing", "name": "time_limit_test", "signature": "def time_limit_test(self, nmax)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007100", "prompt": "Implement the Python class `unitTests` described below.\n\nClass description:\nImplement the unitTests class.\n\nMethod signatures and docstrings:\n- def test_single_test(self): Forces class testing\n- def time_limit_test(self, nmax): Timelimit testing", "prompted_full_text": "Implement the Python class `unitTests` described below.\n\nClass description:\nImplement the unitTests class.\n\nMethod signatures and docstrings:\n- def test_single_test(self): Forces class testing\n- def time_limit_test(self, nmax): Timelimit testing\n\n<|skeleton|>\nclass unitTests:\n\n def test_single_test(self):\n \"\"\"Forces class testing\"\"\"\n <|body_0|>\n\n def time_limit_test(self, nmax):\n \"\"\"Timelimit testing\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n test = '20 40 60 80 100\\n0 1 2 3 4\\n1 0'\n d = Forces(test)\n self.assertEqual(d.numw, [0, 1, 2, 3, 4])\n self.assertEqual(Forces(test).calculate(), '4900')\n test = '119 119 119 119 119\\n0 0 0 0 0\\n10 0'\n self.assertEqual(Forces(test).calculate(), '4930')\n test = ''\n test = ''\n<|end_body_0|>\n\n<|body_start_1|>\n import random\n import timeit\n test = str(nmax) + ' ' + str(nmax) + '\\n'\n numnums = [str(i) + ' ' + str(i + 1) for i in range(nmax)]\n test += '\\n'.join(numnums) + '\\n'\n nums = [random.randint(1, 10000) for i in range(nmax)]\n test += ' '.join(map(str, nums)) + '\\n'\n start = timeit.default_timer()\n d = Forces(test)\n calc = timeit.default_timer()\n d.calculate()\n stop = timeit.default_timer()\n print('\\nTimelimit Test: ' + '{0:.3f}s (init {1:.3f}s calc {2:.3f}s)'.format(stop - start, calc - start, stop - calc))\n<|end_body_1|>\n", "revision_id": "ae02ea872ca91ef98630cc172a844b82cc56f621", "skeleton": "<|skeleton|>\nclass unitTests:\n\n def test_single_test(self):\n \"\"\"Forces class testing\"\"\"\n <|body_0|>\n\n def time_limit_test(self, nmax):\n \"\"\"Timelimit testing\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class unitTests:\n def test_single_test(self):\n \"\"\"Forces class testing\"\"\"\n test = '20 40 60 80 100\\n0 1 2 3 4\\n1 0'\n d = Forces(test)\n self.assertEqual(d.numw, [0, 1, 2, 3, 4])\n self.assertEqual(Forces(test).calculate(), '4900')\n test = '119 119 119 119 119\\n0 0 0 0 0\\n10 0'\n self.assertEqual(Forces(test).calculate(), '4930')\n test = ''\n test = ''\n\n def time_limit_test(self, nmax):\n \"\"\"Timelimit testing\"\"\"\n import random\n import timeit\n test = str(nmax) + ' ' + str(nmax) + '\\n'\n numnums = [str(i) + ' ' + str(i + 1) for i in range(nmax)]\n test += '\\n'.join(numnums) + '\\n'\n nums = [random.randint(1, 10000) for i in range(nmax)]\n test += ' '.join(map(str, nums)) + '\\n'\n start = timeit.default_timer()\n d = Forces(test)\n calc = timeit.default_timer()\n d.calculate()\n stop = timeit.default_timer()\n print('\\nTimelimit Test: ' + '{0:.3f}s (init {1:.3f}s calc {2:.3f}s)'.format(stop - start, calc - start, stop - calc))\n", "source": "the_stack_v2_python_sparse", "source_path": "codeforces/604A_forces.py", "source_repo": "snsokolov/contests", "split": "test", "star_events_count": 1} {"blob_id": "1990c31f0e0209f72648e5dabe0c9cc1fb3835ef", "bodies": ["self.__parent = parent\nself.__children = [[] for _ in xrange(len(parent))]\nfor i, x in enumerate(parent):\n if x != -1:\n self.__children[x].append(i)\nself.__locked = {}", "if num in self.__locked:\n return False\nself.__locked[num] = user\nreturn True", "if self.__locked.get(num) != user:\n return False\ndel self.__locked[num]\nreturn True", "node = num\nwhile node != -1:\n if node in self.__locked:\n return False\n node = self.__parent[node]\nresult = False\nstk = [num]\nwhile stk:\n node = stk.pop()\n if node in self.__locked:\n del self.__locked[node]\n result = True\n for child in self.__children[node]:\n stk.append(child)\nif result:\n self.__locked[num] = user\nreturn result"], "bodies_text": "<|body_start_0|>\n self.__parent = parent\n self.__children = [[] for _ in xrange(len(parent))]\n for i, x in enumerate(parent):\n if x != -1:\n self.__children[x].append(i)\n self.__locked = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if num in self.__locked:\n return False\n self.__locked[num] = user\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self.__locked.get(num) != user:\n return False\n del self.__locked[num]\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n node = num\n while node != -1:\n if node in self.__locked:\n return False\n node = self.__parent[node]\n result = False\n stk = [num]\n while stk:\n node = stk.pop()\n if node in self.__locked:\n del self.__locked[node]\n result = True\n for child in self.__children[node]:\n stk.append(child)\n if result:\n self.__locked[num] = user\n return result\n<|end_body_3|>\n", "class_docstring": "", "class_name": "LockingTree", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LockingTree:\n\n def __init__(self, parent):\n \"\"\":type parent: List[int]\"\"\"\n <|body_0|>\n\n def lock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_1|>\n\n def unlock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_2|>\n\n def upgrade(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__parent = parent\n self.__children = [[] for _ in xrange(len(parent))]\n for i, x in enumerate(parent):\n if x != -1:\n self.__children[x].append(i)\n self.__locked = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if num in self.__locked:\n return False\n self.__locked[num] = user\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self.__locked.get(num) != user:\n return False\n del self.__locked[num]\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n node = num\n while node != -1:\n if node in self.__locked:\n return False\n node = self.__parent[node]\n result = False\n stk = [num]\n while stk:\n node = stk.pop()\n if node in self.__locked:\n del self.__locked[node]\n result = True\n for child in self.__children[node]:\n stk.append(child)\n if result:\n self.__locked[num] = user\n return result\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000287", "length_bytes": 1568, "license_type": "permissive", "methods": [{"docstring": ":type parent: List[int]", "name": "__init__", "signature": "def __init__(self, parent)"}, {"docstring": ":type num: int :type user: int :rtype: bool", "name": "lock", "signature": "def lock(self, num, user)"}, {"docstring": ":type num: int :type user: int :rtype: bool", "name": "unlock", "signature": "def unlock(self, num, user)"}, {"docstring": ":type num: int :type user: int :rtype: bool", "name": "upgrade", "signature": "def upgrade(self, num, user)"}], "n_methods": 4, "prompt": "Implement the Python class `LockingTree` described below.\n\nClass description:\nImplement the LockingTree class.\n\nMethod signatures and docstrings:\n- def __init__(self, parent): :type parent: List[int]\n- def lock(self, num, user): :type num: int :type user: int :rtype: bool\n- def unlock(self, num, user): :type num: int :type user: int :rtype: bool\n- def upgrade(self, num, user): :type num: int :type user: int :rtype: bool", "prompted_full_text": "Implement the Python class `LockingTree` described below.\n\nClass description:\nImplement the LockingTree class.\n\nMethod signatures and docstrings:\n- def __init__(self, parent): :type parent: List[int]\n- def lock(self, num, user): :type num: int :type user: int :rtype: bool\n- def unlock(self, num, user): :type num: int :type user: int :rtype: bool\n- def upgrade(self, num, user): :type num: int :type user: int :rtype: bool\n\n<|skeleton|>\nclass LockingTree:\n\n def __init__(self, parent):\n \"\"\":type parent: List[int]\"\"\"\n <|body_0|>\n\n def lock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_1|>\n\n def unlock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_2|>\n\n def upgrade(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.__parent = parent\n self.__children = [[] for _ in xrange(len(parent))]\n for i, x in enumerate(parent):\n if x != -1:\n self.__children[x].append(i)\n self.__locked = {}\n<|end_body_0|>\n\n<|body_start_1|>\n if num in self.__locked:\n return False\n self.__locked[num] = user\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n if self.__locked.get(num) != user:\n return False\n del self.__locked[num]\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n node = num\n while node != -1:\n if node in self.__locked:\n return False\n node = self.__parent[node]\n result = False\n stk = [num]\n while stk:\n node = stk.pop()\n if node in self.__locked:\n del self.__locked[node]\n result = True\n for child in self.__children[node]:\n stk.append(child)\n if result:\n self.__locked[num] = user\n return result\n<|end_body_3|>\n", "revision_id": "4dc4e6642dc92f1983c13564cc0fd99917cab358", "skeleton": "<|skeleton|>\nclass LockingTree:\n\n def __init__(self, parent):\n \"\"\":type parent: List[int]\"\"\"\n <|body_0|>\n\n def lock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_1|>\n\n def unlock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_2|>\n\n def upgrade(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LockingTree:\n def __init__(self, parent):\n \"\"\":type parent: List[int]\"\"\"\n self.__parent = parent\n self.__children = [[] for _ in xrange(len(parent))]\n for i, x in enumerate(parent):\n if x != -1:\n self.__children[x].append(i)\n self.__locked = {}\n\n def lock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n if num in self.__locked:\n return False\n self.__locked[num] = user\n return True\n\n def unlock(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n if self.__locked.get(num) != user:\n return False\n del self.__locked[num]\n return True\n\n def upgrade(self, num, user):\n \"\"\":type num: int :type user: int :rtype: bool\"\"\"\n node = num\n while node != -1:\n if node in self.__locked:\n return False\n node = self.__parent[node]\n result = False\n stk = [num]\n while stk:\n node = stk.pop()\n if node in self.__locked:\n del self.__locked[node]\n result = True\n for child in self.__children[node]:\n stk.append(child)\n if result:\n self.__locked[num] = user\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "Python/operations-on-tree.py", "source_repo": "kamyu104/LeetCode-Solutions", "split": "test", "star_events_count": 4549} {"blob_id": "7b5e9b9159e5f29d935d4040d09b9a088e88c404", "bodies": ["try:\n permission = UserPermissions.objects.get(id=kwargs['permission_id'], repository_id=kwargs['repository_id'])\nexcept UserPermissions.DoesNotExist:\n raise Http404\nsuper(PermissionDetail, self).check_permissions(request)\nreturn permission", "permission = self.get_object(request, **kwargs)\nserializer_context = {'request': Request(request), 'repository_id': kwargs['repository_id']}\nserializer = PermissionSerializer(permission, context=serializer_context)\nreturn Response(serializer.data)", "permission = self.get_object(request, **kwargs)\nsuper(PermissionDetail, self).check_object_permissions(request, permission)\npermission.delete()\nreturn Response(status=status.HTTP_204_NO_CONTENT)"], "bodies_text": "<|body_start_0|>\n try:\n permission = UserPermissions.objects.get(id=kwargs['permission_id'], repository_id=kwargs['repository_id'])\n except UserPermissions.DoesNotExist:\n raise Http404\n super(PermissionDetail, self).check_permissions(request)\n return permission\n<|end_body_0|>\n\n<|body_start_1|>\n permission = self.get_object(request, **kwargs)\n serializer_context = {'request': Request(request), 'repository_id': kwargs['repository_id']}\n serializer = PermissionSerializer(permission, context=serializer_context)\n return Response(serializer.data)\n<|end_body_1|>\n\n<|body_start_2|>\n permission = self.get_object(request, **kwargs)\n super(PermissionDetail, self).check_object_permissions(request, permission)\n permission.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_2|>\n", "class_docstring": "This view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$", "class_name": "PermissionDetail", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PermissionDetail:\n \"\"\"This view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$\"\"\"\n\n def get_object(self, request, *args, **kwargs):\n \"\"\"Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist\"\"\"\n <|body_0|>\n\n def get(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data\"\"\"\n <|body_1|>\n\n def delete(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n permission = UserPermissions.objects.get(id=kwargs['permission_id'], repository_id=kwargs['repository_id'])\n except UserPermissions.DoesNotExist:\n raise Http404\n super(PermissionDetail, self).check_permissions(request)\n return permission\n<|end_body_0|>\n\n<|body_start_1|>\n permission = self.get_object(request, **kwargs)\n serializer_context = {'request': Request(request), 'repository_id': kwargs['repository_id']}\n serializer = PermissionSerializer(permission, context=serializer_context)\n return Response(serializer.data)\n<|end_body_1|>\n\n<|body_start_2|>\n permission = self.get_object(request, **kwargs)\n super(PermissionDetail, self).check_object_permissions(request, permission)\n permission.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000288", "length_bytes": 4874, "license_type": "permissive", "methods": [{"docstring": "Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist", "name": "get_object", "signature": "def get_object(self, request, *args, **kwargs)"}, {"docstring": "This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data", "name": "get", "signature": "def get(self, request: Request, *args, **kwargs: dict) -> Response"}, {"docstring": "This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404", "name": "delete", "signature": "def delete(self, request: Request, *args, **kwargs: dict) -> Response"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_053883", "prompt": "Implement the Python class `PermissionDetail` described below.\n\nClass description:\nThis view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$\n\nMethod signatures and docstrings:\n- def get_object(self, request, *args, **kwargs): Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist\n- def get(self, request: Request, *args, **kwargs: dict) -> Response: This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data\n- def delete(self, request: Request, *args, **kwargs: dict) -> Response: This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404", "prompted_full_text": "Implement the Python class `PermissionDetail` described below.\n\nClass description:\nThis view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$\n\nMethod signatures and docstrings:\n- def get_object(self, request, *args, **kwargs): Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist\n- def get(self, request: Request, *args, **kwargs: dict) -> Response: This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data\n- def delete(self, request: Request, *args, **kwargs: dict) -> Response: This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404\n\n<|skeleton|>\nclass PermissionDetail:\n \"\"\"This view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$\"\"\"\n\n def get_object(self, request, *args, **kwargs):\n \"\"\"Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist\"\"\"\n <|body_0|>\n\n def get(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data\"\"\"\n <|body_1|>\n\n def delete(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n permission = UserPermissions.objects.get(id=kwargs['permission_id'], repository_id=kwargs['repository_id'])\n except UserPermissions.DoesNotExist:\n raise Http404\n super(PermissionDetail, self).check_permissions(request)\n return permission\n<|end_body_0|>\n\n<|body_start_1|>\n permission = self.get_object(request, **kwargs)\n serializer_context = {'request': Request(request), 'repository_id': kwargs['repository_id']}\n serializer = PermissionSerializer(permission, context=serializer_context)\n return Response(serializer.data)\n<|end_body_1|>\n\n<|body_start_2|>\n permission = self.get_object(request, **kwargs)\n super(PermissionDetail, self).check_object_permissions(request, permission)\n permission.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n<|end_body_2|>\n", "revision_id": "fdb911dfafbd2609b7f96561ab6780b4131a77bd", "skeleton": "<|skeleton|>\nclass PermissionDetail:\n \"\"\"This view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$\"\"\"\n\n def get_object(self, request, *args, **kwargs):\n \"\"\"Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist\"\"\"\n <|body_0|>\n\n def get(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data\"\"\"\n <|body_1|>\n\n def delete(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PermissionDetail:\n \"\"\"This view handle all requests what comes on endpoint repositories/(?P[0-9]+)/permissions/(?P[0-9]+)/$\"\"\"\n\n def get_object(self, request, *args, **kwargs):\n \"\"\"Trying to get permission(permission and repository id) in database and return them :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: UserPermissions object or DoesNotExist exception :raise UserPermissions.DoesNotExist\"\"\"\n try:\n permission = UserPermissions.objects.get(id=kwargs['permission_id'], repository_id=kwargs['repository_id'])\n except UserPermissions.DoesNotExist:\n raise Http404\n super(PermissionDetail, self).check_permissions(request)\n return permission\n\n def get(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle GET request return JSON with detailed information for specific permission :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: HTTP response with serialized in JSON permission data\"\"\"\n permission = self.get_object(request, **kwargs)\n serializer_context = {'request': Request(request), 'repository_id': kwargs['repository_id']}\n serializer = PermissionSerializer(permission, context=serializer_context)\n return Response(serializer.data)\n\n def delete(self, request: Request, *args, **kwargs: dict) -> Response:\n \"\"\"This method handle DELETE http request on permission detail endpoint :param request: http request :param args: other parameters :param kwargs: dict parsed url variables {\"permission_id\": \"id\", \"repository_id\":\"id\"} :return: on success HTTP 204 status code, else return 404\"\"\"\n permission = self.get_object(request, **kwargs)\n super(PermissionDetail, self).check_object_permissions(request, permission)\n permission.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n", "source": "the_stack_v2_python_sparse", "source_path": "permissions/views.py", "source_repo": "Kh-011-WebUIPython/lit", "split": "test", "star_events_count": 4} {"blob_id": "73a4d3ad76877d05c40b2405b19c9bb27d39b183", "bodies": ["if 0 <= amount <= 99999999:\n self.summary.record_deposit(account, amount)\nelse:\n raise ValueError('Invalid transaction value for agent mode')", "if 0 <= amount <= 99999999:\n self.summary.record_withdraw(account, amount)\nelse:\n raise ValueError('Invalid transaction value for agent mode')", "if 0 <= amount <= 99999999:\n self.summary.record_transfer(account1, account2, amount)\nelse:\n raise ValueError('Invalid transaction value for agent mode')", "if account in self._new_accounts:\n return True\nelse:\n return False", "if not (self.accounts.is_account_active(account) or self.is_account_new(account)):\n self._new_accounts.append(int(account))\n self.summary.record_create_account(account, name)\nelse:\n raise ValueError('Account {} already exists'.format(account))", "if self.accounts.is_account_active(account):\n self.accounts.delete_account(account)\n self._deleted_accounts.append(account)\n self.summary.record_delete_account(account, name)\nelse:\n raise ValueError('Account {} is not active, it cannot be deleted'.format(account))"], "bodies_text": "<|body_start_0|>\n if 0 <= amount <= 99999999:\n self.summary.record_deposit(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_0|>\n\n<|body_start_1|>\n if 0 <= amount <= 99999999:\n self.summary.record_withdraw(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_1|>\n\n<|body_start_2|>\n if 0 <= amount <= 99999999:\n self.summary.record_transfer(account1, account2, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_2|>\n\n<|body_start_3|>\n if account in self._new_accounts:\n return True\n else:\n return False\n<|end_body_3|>\n\n<|body_start_4|>\n if not (self.accounts.is_account_active(account) or self.is_account_new(account)):\n self._new_accounts.append(int(account))\n self.summary.record_create_account(account, name)\n else:\n raise ValueError('Account {} already exists'.format(account))\n<|end_body_4|>\n\n<|body_start_5|>\n if self.accounts.is_account_active(account):\n self.accounts.delete_account(account)\n self._deleted_accounts.append(account)\n self.summary.record_delete_account(account, name)\n else:\n raise ValueError('Account {} is not active, it cannot be deleted'.format(account))\n<|end_body_5|>\n", "class_docstring": "AgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)", "class_name": "AgentSession", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AgentSession:\n \"\"\"AgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)\"\"\"\n\n def deposit(self, account, amount):\n \"\"\"Checks amount and makes deposit to account\"\"\"\n <|body_0|>\n\n def withdraw(self, account, amount):\n \"\"\"Checks amount and withdraws from account\"\"\"\n <|body_1|>\n\n def transfer(self, account1, account2, amount):\n \"\"\"Checks amount and makes transfer\"\"\"\n <|body_2|>\n\n def is_account_new(self, account):\n \"\"\"Check if the account is a new account created in this session\"\"\"\n <|body_3|>\n\n def create_account(self, account, name):\n \"\"\"Creates account\"\"\"\n <|body_4|>\n\n def delete_account(self, account, name):\n \"\"\"Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 0 <= amount <= 99999999:\n self.summary.record_deposit(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_0|>\n\n<|body_start_1|>\n if 0 <= amount <= 99999999:\n self.summary.record_withdraw(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_1|>\n\n<|body_start_2|>\n if 0 <= amount <= 99999999:\n self.summary.record_transfer(account1, account2, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_2|>\n\n<|body_start_3|>\n if account in self._new_accounts:\n return True\n else:\n return False\n<|end_body_3|>\n\n<|body_start_4|>\n if not (self.accounts.is_account_active(account) or self.is_account_new(account)):\n self._new_accounts.append(int(account))\n self.summary.record_create_account(account, name)\n else:\n raise ValueError('Account {} already exists'.format(account))\n<|end_body_4|>\n\n<|body_start_5|>\n if self.accounts.is_account_active(account):\n self.accounts.delete_account(account)\n self._deleted_accounts.append(account)\n self.summary.record_delete_account(account, name)\n else:\n raise ValueError('Account {} is not active, it cannot be deleted'.format(account))\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000289", "length_bytes": 2247, "license_type": "no_license", "methods": [{"docstring": "Checks amount and makes deposit to account", "name": "deposit", "signature": "def deposit(self, account, amount)"}, {"docstring": "Checks amount and withdraws from account", "name": "withdraw", "signature": "def withdraw(self, account, amount)"}, {"docstring": "Checks amount and makes transfer", "name": "transfer", "signature": "def transfer(self, account1, account2, amount)"}, {"docstring": "Check if the account is a new account created in this session", "name": "is_account_new", "signature": "def is_account_new(self, account)"}, {"docstring": "Creates account", "name": "create_account", "signature": "def create_account(self, account, name)"}, {"docstring": "Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list", "name": "delete_account", "signature": "def delete_account(self, account, name)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_val_000755", "prompt": "Implement the Python class `AgentSession` described below.\n\nClass description:\nAgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)\n\nMethod signatures and docstrings:\n- def deposit(self, account, amount): Checks amount and makes deposit to account\n- def withdraw(self, account, amount): Checks amount and withdraws from account\n- def transfer(self, account1, account2, amount): Checks amount and makes transfer\n- def is_account_new(self, account): Check if the account is a new account created in this session\n- def create_account(self, account, name): Creates account\n- def delete_account(self, account, name): Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list", "prompted_full_text": "Implement the Python class `AgentSession` described below.\n\nClass description:\nAgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)\n\nMethod signatures and docstrings:\n- def deposit(self, account, amount): Checks amount and makes deposit to account\n- def withdraw(self, account, amount): Checks amount and withdraws from account\n- def transfer(self, account1, account2, amount): Checks amount and makes transfer\n- def is_account_new(self, account): Check if the account is a new account created in this session\n- def create_account(self, account, name): Creates account\n- def delete_account(self, account, name): Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list\n\n<|skeleton|>\nclass AgentSession:\n \"\"\"AgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)\"\"\"\n\n def deposit(self, account, amount):\n \"\"\"Checks amount and makes deposit to account\"\"\"\n <|body_0|>\n\n def withdraw(self, account, amount):\n \"\"\"Checks amount and withdraws from account\"\"\"\n <|body_1|>\n\n def transfer(self, account1, account2, amount):\n \"\"\"Checks amount and makes transfer\"\"\"\n <|body_2|>\n\n def is_account_new(self, account):\n \"\"\"Check if the account is a new account created in this session\"\"\"\n <|body_3|>\n\n def create_account(self, account, name):\n \"\"\"Creates account\"\"\"\n <|body_4|>\n\n def delete_account(self, account, name):\n \"\"\"Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 0 <= amount <= 99999999:\n self.summary.record_deposit(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_0|>\n\n<|body_start_1|>\n if 0 <= amount <= 99999999:\n self.summary.record_withdraw(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_1|>\n\n<|body_start_2|>\n if 0 <= amount <= 99999999:\n self.summary.record_transfer(account1, account2, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n<|end_body_2|>\n\n<|body_start_3|>\n if account in self._new_accounts:\n return True\n else:\n return False\n<|end_body_3|>\n\n<|body_start_4|>\n if not (self.accounts.is_account_active(account) or self.is_account_new(account)):\n self._new_accounts.append(int(account))\n self.summary.record_create_account(account, name)\n else:\n raise ValueError('Account {} already exists'.format(account))\n<|end_body_4|>\n\n<|body_start_5|>\n if self.accounts.is_account_active(account):\n self.accounts.delete_account(account)\n self._deleted_accounts.append(account)\n self.summary.record_delete_account(account, name)\n else:\n raise ValueError('Account {} is not active, it cannot be deleted'.format(account))\n<|end_body_5|>\n", "revision_id": "05d7e84158c162cdaa638d3856e07e6f93288863", "skeleton": "<|skeleton|>\nclass AgentSession:\n \"\"\"AgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)\"\"\"\n\n def deposit(self, account, amount):\n \"\"\"Checks amount and makes deposit to account\"\"\"\n <|body_0|>\n\n def withdraw(self, account, amount):\n \"\"\"Checks amount and withdraws from account\"\"\"\n <|body_1|>\n\n def transfer(self, account1, account2, amount):\n \"\"\"Checks amount and makes transfer\"\"\"\n <|body_2|>\n\n def is_account_new(self, account):\n \"\"\"Check if the account is a new account created in this session\"\"\"\n <|body_3|>\n\n def create_account(self, account, name):\n \"\"\"Creates account\"\"\"\n <|body_4|>\n\n def delete_account(self, account, name):\n \"\"\"Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AgentSession:\n \"\"\"AgentSession class is a child class of the abstract class \"Session\" It allows the user to perform privileged bank transactions - deposit, withdraw, transfer, create account and delete account It also verfies input looking for constraints and value error (invalid amount)\"\"\"\n\n def deposit(self, account, amount):\n \"\"\"Checks amount and makes deposit to account\"\"\"\n if 0 <= amount <= 99999999:\n self.summary.record_deposit(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n\n def withdraw(self, account, amount):\n \"\"\"Checks amount and withdraws from account\"\"\"\n if 0 <= amount <= 99999999:\n self.summary.record_withdraw(account, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n\n def transfer(self, account1, account2, amount):\n \"\"\"Checks amount and makes transfer\"\"\"\n if 0 <= amount <= 99999999:\n self.summary.record_transfer(account1, account2, amount)\n else:\n raise ValueError('Invalid transaction value for agent mode')\n\n def is_account_new(self, account):\n \"\"\"Check if the account is a new account created in this session\"\"\"\n if account in self._new_accounts:\n return True\n else:\n return False\n\n def create_account(self, account, name):\n \"\"\"Creates account\"\"\"\n if not (self.accounts.is_account_active(account) or self.is_account_new(account)):\n self._new_accounts.append(int(account))\n self.summary.record_create_account(account, name)\n else:\n raise ValueError('Account {} already exists'.format(account))\n\n def delete_account(self, account, name):\n \"\"\"Delete account, remove account from active accounts in accounts and add it to the _deleted_accounts list\"\"\"\n if self.accounts.is_account_active(account):\n self.accounts.delete_account(account)\n self._deleted_accounts.append(account)\n self.summary.record_delete_account(account, name)\n else:\n raise ValueError('Account {} is not active, it cannot be deleted'.format(account))\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/frontend/agent_session.py", "source_repo": "nine2k/simbank", "split": "test", "star_events_count": 0} {"blob_id": "9d2175d2429760f388d58fc3ea0993979ceb9402", "bodies": ["params = bookings_parser.parse_args()\nquery = Booking.query\nfor _, filter_op in params.items():\n query = filter_op.apply(query, Booking)\nreturn query.all()", "data = api.payload\ndata['initial_date'] = dt.fromisoformat(data['initial_date']).date()\ndata['final_date'] = dt.fromisoformat(data['final_date']).date()\ninitial_date = data['initial_date']\nfinal_date = data['final_date']\npublication_id = data['publication_id']\noverlapped_bookings = Booking.query.filter((Booking.initial_date.between(initial_date, final_date) | Booking.final_date.between(initial_date, final_date)) & (Booking.publication_id == publication_id) & Booking.booking_status.in_([BookingStatus.ACCEPTED.value, BookingStatus.PENDING.value]) & Booking.blockchain_status.in_([BlockChainStatus.CONFIRMED.value, BlockChainStatus.PENDING.value, BlockChainStatus.UNSET.value])).all()\nif len(overlapped_bookings) >= 1:\n return ({'message': 'The intent booking has overlapping dates'}, 412)\nnew_booking = Booking(**data)\ndb.session.add(new_booking)\ndb.session.commit()\nreturn (api.marshal(new_booking, booking_model), 201)"], "bodies_text": "<|body_start_0|>\n params = bookings_parser.parse_args()\n query = Booking.query\n for _, filter_op in params.items():\n query = filter_op.apply(query, Booking)\n return query.all()\n<|end_body_0|>\n\n<|body_start_1|>\n data = api.payload\n data['initial_date'] = dt.fromisoformat(data['initial_date']).date()\n data['final_date'] = dt.fromisoformat(data['final_date']).date()\n initial_date = data['initial_date']\n final_date = data['final_date']\n publication_id = data['publication_id']\n overlapped_bookings = Booking.query.filter((Booking.initial_date.between(initial_date, final_date) | Booking.final_date.between(initial_date, final_date)) & (Booking.publication_id == publication_id) & Booking.booking_status.in_([BookingStatus.ACCEPTED.value, BookingStatus.PENDING.value]) & Booking.blockchain_status.in_([BlockChainStatus.CONFIRMED.value, BlockChainStatus.PENDING.value, BlockChainStatus.UNSET.value])).all()\n if len(overlapped_bookings) >= 1:\n return ({'message': 'The intent booking has overlapping dates'}, 412)\n new_booking = Booking(**data)\n db.session.add(new_booking)\n db.session.commit()\n return (api.marshal(new_booking, booking_model), 201)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BookingListResource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BookingListResource:\n\n def get(self):\n \"\"\"Get all bookings.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Create a new booking\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n params = bookings_parser.parse_args()\n query = Booking.query\n for _, filter_op in params.items():\n query = filter_op.apply(query, Booking)\n return query.all()\n<|end_body_0|>\n\n<|body_start_1|>\n data = api.payload\n data['initial_date'] = dt.fromisoformat(data['initial_date']).date()\n data['final_date'] = dt.fromisoformat(data['final_date']).date()\n initial_date = data['initial_date']\n final_date = data['final_date']\n publication_id = data['publication_id']\n overlapped_bookings = Booking.query.filter((Booking.initial_date.between(initial_date, final_date) | Booking.final_date.between(initial_date, final_date)) & (Booking.publication_id == publication_id) & Booking.booking_status.in_([BookingStatus.ACCEPTED.value, BookingStatus.PENDING.value]) & Booking.blockchain_status.in_([BlockChainStatus.CONFIRMED.value, BlockChainStatus.PENDING.value, BlockChainStatus.UNSET.value])).all()\n if len(overlapped_bookings) >= 1:\n return ({'message': 'The intent booking has overlapping dates'}, 412)\n new_booking = Booking(**data)\n db.session.add(new_booking)\n db.session.commit()\n return (api.marshal(new_booking, booking_model), 201)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000290", "length_bytes": 8200, "license_type": "permissive", "methods": [{"docstring": "Get all bookings.", "name": "get", "signature": "def get(self)"}, {"docstring": "Create a new booking", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046438", "prompt": "Implement the Python class `BookingListResource` described below.\n\nClass description:\nImplement the BookingListResource class.\n\nMethod signatures and docstrings:\n- def get(self): Get all bookings.\n- def post(self): Create a new booking", "prompted_full_text": "Implement the Python class `BookingListResource` described below.\n\nClass description:\nImplement the BookingListResource class.\n\nMethod signatures and docstrings:\n- def get(self): Get all bookings.\n- def post(self): Create a new booking\n\n<|skeleton|>\nclass BookingListResource:\n\n def get(self):\n \"\"\"Get all bookings.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Create a new booking\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n params = bookings_parser.parse_args()\n query = Booking.query\n for _, filter_op in params.items():\n query = filter_op.apply(query, Booking)\n return query.all()\n<|end_body_0|>\n\n<|body_start_1|>\n data = api.payload\n data['initial_date'] = dt.fromisoformat(data['initial_date']).date()\n data['final_date'] = dt.fromisoformat(data['final_date']).date()\n initial_date = data['initial_date']\n final_date = data['final_date']\n publication_id = data['publication_id']\n overlapped_bookings = Booking.query.filter((Booking.initial_date.between(initial_date, final_date) | Booking.final_date.between(initial_date, final_date)) & (Booking.publication_id == publication_id) & Booking.booking_status.in_([BookingStatus.ACCEPTED.value, BookingStatus.PENDING.value]) & Booking.blockchain_status.in_([BlockChainStatus.CONFIRMED.value, BlockChainStatus.PENDING.value, BlockChainStatus.UNSET.value])).all()\n if len(overlapped_bookings) >= 1:\n return ({'message': 'The intent booking has overlapping dates'}, 412)\n new_booking = Booking(**data)\n db.session.add(new_booking)\n db.session.commit()\n return (api.marshal(new_booking, booking_model), 201)\n<|end_body_1|>\n", "revision_id": "92fd3c8c5e4c8462aa0e7f00e50f3c60680ab161", "skeleton": "<|skeleton|>\nclass BookingListResource:\n\n def get(self):\n \"\"\"Get all bookings.\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Create a new booking\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BookingListResource:\n def get(self):\n \"\"\"Get all bookings.\"\"\"\n params = bookings_parser.parse_args()\n query = Booking.query\n for _, filter_op in params.items():\n query = filter_op.apply(query, Booking)\n return query.all()\n\n def post(self):\n \"\"\"Create a new booking\"\"\"\n data = api.payload\n data['initial_date'] = dt.fromisoformat(data['initial_date']).date()\n data['final_date'] = dt.fromisoformat(data['final_date']).date()\n initial_date = data['initial_date']\n final_date = data['final_date']\n publication_id = data['publication_id']\n overlapped_bookings = Booking.query.filter((Booking.initial_date.between(initial_date, final_date) | Booking.final_date.between(initial_date, final_date)) & (Booking.publication_id == publication_id) & Booking.booking_status.in_([BookingStatus.ACCEPTED.value, BookingStatus.PENDING.value]) & Booking.blockchain_status.in_([BlockChainStatus.CONFIRMED.value, BlockChainStatus.PENDING.value, BlockChainStatus.UNSET.value])).all()\n if len(overlapped_bookings) >= 1:\n return ({'message': 'The intent booking has overlapping dates'}, 412)\n new_booking = Booking(**data)\n db.session.add(new_booking)\n db.session.commit()\n return (api.marshal(new_booking, booking_model), 201)\n", "source": "the_stack_v2_python_sparse", "source_path": "booking_microservice/namespaces/bookings.py", "source_repo": "7552-2020C2-grupo5/bookings-microservice", "split": "test", "star_events_count": 0} {"blob_id": "17a62e601443c5ef8b4a70f41f263457704ff5a3", "bodies": ["length = len(nums)\nzero = [0] * length\ncur = 0\nfor index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n zero[index] = cur\nfor index, num in enumerate(nums):\n if num != 0:\n num[index - zero[index]] = num\nfor i in range(cur):\n nums[length - i] = 0", "length = len(nums)\ncur = 0\nfor index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n nums[index - cur] = num\nfor i in range(cur):\n nums[length - i - 1] = 0"], "bodies_text": "<|body_start_0|>\n length = len(nums)\n zero = [0] * length\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n zero[index] = cur\n for index, num in enumerate(nums):\n if num != 0:\n num[index - zero[index]] = num\n for i in range(cur):\n nums[length - i] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(nums)\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n nums[index - cur] = num\n for i in range(cur):\n nums[length - i - 1] = 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def moveZeroes1(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(nums)\n zero = [0] * length\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n zero[index] = cur\n for index, num in enumerate(nums):\n if num != 0:\n num[index - zero[index]] = num\n for i in range(cur):\n nums[length - i] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(nums)\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n nums[index - cur] = num\n for i in range(cur):\n nums[length - i - 1] = 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000291", "length_bytes": 1110, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.", "name": "moveZeroes1", "signature": "def moveZeroes1(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.", "name": "moveZeroes", "signature": "def moveZeroes(self, nums)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_001734", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes1(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\n- def moveZeroes(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def moveZeroes1(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\n- def moveZeroes(self, nums): :type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\n\n<|skeleton|>\nclass Solution:\n\n def moveZeroes1(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n length = len(nums)\n zero = [0] * length\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n zero[index] = cur\n for index, num in enumerate(nums):\n if num != 0:\n num[index - zero[index]] = num\n for i in range(cur):\n nums[length - i] = 0\n<|end_body_0|>\n\n<|body_start_1|>\n length = len(nums)\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n nums[index - cur] = num\n for i in range(cur):\n nums[length - i - 1] = 0\n<|end_body_1|>\n", "revision_id": "70bdd75b6af2e1811c1beab22050c01d28d7373e", "skeleton": "<|skeleton|>\nclass Solution:\n\n def moveZeroes1(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_0|>\n\n def moveZeroes(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def moveZeroes1(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n length = len(nums)\n zero = [0] * length\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n zero[index] = cur\n for index, num in enumerate(nums):\n if num != 0:\n num[index - zero[index]] = num\n for i in range(cur):\n nums[length - i] = 0\n\n def moveZeroes(self, nums):\n \"\"\":type nums: List[int] :rtype: void Do not return anything, modify nums in-place instead.\"\"\"\n length = len(nums)\n cur = 0\n for index, num in enumerate(nums):\n if num == 0:\n cur += 1\n else:\n nums[index - cur] = num\n for i in range(cur):\n nums[length - i - 1] = 0\n", "source": "the_stack_v2_python_sparse", "source_path": "python/leetcode_bak/283_Move_Zeroes.py", "source_repo": "bobcaoge/my-code", "split": "test", "star_events_count": 0} {"blob_id": "3c8fabed31437613d3508d3388c1f85b89a53303", "bodies": ["self.data_path = data_path\nself.metadata = metadata\nself.support = support\nself.target_image_l = target_image\nself.target_image = set(target_image)\nself.validation_eqs_l = validation_eqs\nself.validation_eqs = set(validation_eqs)\nself.res = Manager().dict()", "eq = load_eq(self.data_path, idx, self.metadata.eqs_per_hdf)\ndict_costs = return_dict_metadata_dummy_constant(self.metadata)\nconsts = torch.stack([torch.ones([int(self.support.shape[1])]) * dict_costs[key] for key in dict_costs.keys()])\ninput_lambdi = torch.cat([self.support, consts], axis=0)\nassert input_lambdi.shape[0] == len(self.metadata.total_coefficients) + len(self.metadata.total_variables)\nconst, dummy_const = sample_symbolic_constants(eq)\neq_str = sympify(eq.expr.format(**dummy_const))\nif str(eq_str) in self.validation_eqs:\n print('EQUATION IN VAL')\nargs = [eq.code, input_lambdi]\ny = evaluate_fun(args)\ncurr = [x if not np.isnan(x) else 'nan' for x in y]\nval = tuple(curr)\nif val in self.target_image:\n index = self.target_image_l.index(val)\n print('EQUATION IN VAL')\n if not index in self.res:\n self.res[index] = self.validation_eqs_l[index]\n print(len(self.res))"], "bodies_text": "<|body_start_0|>\n self.data_path = data_path\n self.metadata = metadata\n self.support = support\n self.target_image_l = target_image\n self.target_image = set(target_image)\n self.validation_eqs_l = validation_eqs\n self.validation_eqs = set(validation_eqs)\n self.res = Manager().dict()\n<|end_body_0|>\n\n<|body_start_1|>\n eq = load_eq(self.data_path, idx, self.metadata.eqs_per_hdf)\n dict_costs = return_dict_metadata_dummy_constant(self.metadata)\n consts = torch.stack([torch.ones([int(self.support.shape[1])]) * dict_costs[key] for key in dict_costs.keys()])\n input_lambdi = torch.cat([self.support, consts], axis=0)\n assert input_lambdi.shape[0] == len(self.metadata.total_coefficients) + len(self.metadata.total_variables)\n const, dummy_const = sample_symbolic_constants(eq)\n eq_str = sympify(eq.expr.format(**dummy_const))\n if str(eq_str) in self.validation_eqs:\n print('EQUATION IN VAL')\n args = [eq.code, input_lambdi]\n y = evaluate_fun(args)\n curr = [x if not np.isnan(x) else 'nan' for x in y]\n val = tuple(curr)\n if val in self.target_image:\n index = self.target_image_l.index(val)\n print('EQUATION IN VAL')\n if not index in self.res:\n self.res[index] = self.validation_eqs_l[index]\n print(len(self.res))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Pipeline", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Pipeline:\n\n def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set):\n \"\"\"Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking\"\"\"\n <|body_0|>\n\n def is_valid_and_not_in_validation_set(self, idx: int) -> bool:\n \"\"\"Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data_path = data_path\n self.metadata = metadata\n self.support = support\n self.target_image_l = target_image\n self.target_image = set(target_image)\n self.validation_eqs_l = validation_eqs\n self.validation_eqs = set(validation_eqs)\n self.res = Manager().dict()\n<|end_body_0|>\n\n<|body_start_1|>\n eq = load_eq(self.data_path, idx, self.metadata.eqs_per_hdf)\n dict_costs = return_dict_metadata_dummy_constant(self.metadata)\n consts = torch.stack([torch.ones([int(self.support.shape[1])]) * dict_costs[key] for key in dict_costs.keys()])\n input_lambdi = torch.cat([self.support, consts], axis=0)\n assert input_lambdi.shape[0] == len(self.metadata.total_coefficients) + len(self.metadata.total_variables)\n const, dummy_const = sample_symbolic_constants(eq)\n eq_str = sympify(eq.expr.format(**dummy_const))\n if str(eq_str) in self.validation_eqs:\n print('EQUATION IN VAL')\n args = [eq.code, input_lambdi]\n y = evaluate_fun(args)\n curr = [x if not np.isnan(x) else 'nan' for x in y]\n val = tuple(curr)\n if val in self.target_image:\n index = self.target_image_l.index(val)\n print('EQUATION IN VAL')\n if not index in self.res:\n self.res[index] = self.validation_eqs_l[index]\n print(len(self.res))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000292", "length_bytes": 5094, "license_type": "permissive", "methods": [{"docstring": "Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking", "name": "__init__", "signature": "def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set)"}, {"docstring": "Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset", "name": "is_valid_and_not_in_validation_set", "signature": "def is_valid_and_not_in_validation_set(self, idx: int) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012704", "prompt": "Implement the Python class `Pipeline` described below.\n\nClass description:\nImplement the Pipeline class.\n\nMethod signatures and docstrings:\n- def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set): Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking\n- def is_valid_and_not_in_validation_set(self, idx: int) -> bool: Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset", "prompted_full_text": "Implement the Python class `Pipeline` described below.\n\nClass description:\nImplement the Pipeline class.\n\nMethod signatures and docstrings:\n- def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set): Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking\n- def is_valid_and_not_in_validation_set(self, idx: int) -> bool: Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset\n\n<|skeleton|>\nclass Pipeline:\n\n def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set):\n \"\"\"Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking\"\"\"\n <|body_0|>\n\n def is_valid_and_not_in_validation_set(self, idx: int) -> bool:\n \"\"\"Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.data_path = data_path\n self.metadata = metadata\n self.support = support\n self.target_image_l = target_image\n self.target_image = set(target_image)\n self.validation_eqs_l = validation_eqs\n self.validation_eqs = set(validation_eqs)\n self.res = Manager().dict()\n<|end_body_0|>\n\n<|body_start_1|>\n eq = load_eq(self.data_path, idx, self.metadata.eqs_per_hdf)\n dict_costs = return_dict_metadata_dummy_constant(self.metadata)\n consts = torch.stack([torch.ones([int(self.support.shape[1])]) * dict_costs[key] for key in dict_costs.keys()])\n input_lambdi = torch.cat([self.support, consts], axis=0)\n assert input_lambdi.shape[0] == len(self.metadata.total_coefficients) + len(self.metadata.total_variables)\n const, dummy_const = sample_symbolic_constants(eq)\n eq_str = sympify(eq.expr.format(**dummy_const))\n if str(eq_str) in self.validation_eqs:\n print('EQUATION IN VAL')\n args = [eq.code, input_lambdi]\n y = evaluate_fun(args)\n curr = [x if not np.isnan(x) else 'nan' for x in y]\n val = tuple(curr)\n if val in self.target_image:\n index = self.target_image_l.index(val)\n print('EQUATION IN VAL')\n if not index in self.res:\n self.res[index] = self.validation_eqs_l[index]\n print(len(self.res))\n<|end_body_1|>\n", "revision_id": "e5d83785cf15fee2bf728ccef3a46c5a90cdd8b9", "skeleton": "<|skeleton|>\nclass Pipeline:\n\n def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set):\n \"\"\"Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking\"\"\"\n <|body_0|>\n\n def is_valid_and_not_in_validation_set(self, idx: int) -> bool:\n \"\"\"Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Pipeline:\n def __init__(self, data_path, metadata, support, target_image: list, validation_eqs: set):\n \"\"\"Args: param1: param2: support: target_image: target_image_l: validation_eqs: A set containing all the validation equations in a str format and without constant placeholders. This argument is used for the symbol checking\"\"\"\n self.data_path = data_path\n self.metadata = metadata\n self.support = support\n self.target_image_l = target_image\n self.target_image = set(target_image)\n self.validation_eqs_l = validation_eqs\n self.validation_eqs = set(validation_eqs)\n self.res = Manager().dict()\n\n def is_valid_and_not_in_validation_set(self, idx: int) -> bool:\n \"\"\"Assert both symbolically and numerically that the equation is not in the validation set Args: idx: index to the Eq in the dataset\"\"\"\n eq = load_eq(self.data_path, idx, self.metadata.eqs_per_hdf)\n dict_costs = return_dict_metadata_dummy_constant(self.metadata)\n consts = torch.stack([torch.ones([int(self.support.shape[1])]) * dict_costs[key] for key in dict_costs.keys()])\n input_lambdi = torch.cat([self.support, consts], axis=0)\n assert input_lambdi.shape[0] == len(self.metadata.total_coefficients) + len(self.metadata.total_variables)\n const, dummy_const = sample_symbolic_constants(eq)\n eq_str = sympify(eq.expr.format(**dummy_const))\n if str(eq_str) in self.validation_eqs:\n print('EQUATION IN VAL')\n args = [eq.code, input_lambdi]\n y = evaluate_fun(args)\n curr = [x if not np.isnan(x) else 'nan' for x in y]\n val = tuple(curr)\n if val in self.target_image:\n index = self.target_image_l.index(val)\n print('EQUATION IN VAL')\n if not index in self.res:\n self.res[index] = self.validation_eqs_l[index]\n print(len(self.res))\n", "source": "the_stack_v2_python_sparse", "source_path": "scripts/data_creation/test_presence.py", "source_repo": "ZhaozhiQIAN/NeuralSymbolicRegressionThatScales", "split": "test", "star_events_count": 0} {"blob_id": "e66d8e24bf341965edfcaa2b53ed96840ee39fbc", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\ncrime = repo['yuxiao_yzhang11.crime']\ngroup = {'_id': '$Location', 'count': {'$sum': 1}}\ncrimeCor = crime.aggregate([{'$group': group}])\nrepo.dropCollection('crimeCor')\nrepo.createCollection('crimeCor')\nrepo['yuxiao_yzhang11.crimeCor'].insert_many(crimeCor)\nrepo.dropCollection('crimeZip')\nrepo.createCollection('crimeZip')\ncrime_zips = []\nfor i in crimeCor:\n cor = i['_id'].replace('(', '')\n cor = cor.replace(')', '')\n if cor != '0.0, 0.0':\n geolocator = Nominatim()\n location = geolocator.reverse(cor, timeout=10)\n try:\n zip = location.raw['address']['postcode']\n i['zip'] = zip\n print(i)\n crime_zips.append(i)\n repo['yuxiao_yzhang11.crimeZip'].insert(i)\n except KeyError as e:\n print('Error: ', e, ', at cord: ', cor, ', and zip: ', zip, ', and location: ', location)\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\nthis_script = doc.agent('alg:yuxiao_yzhang11#getCrimeZip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\nresource = doc.entity('dat:20127to20158crimeincident1edit', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nthis_run = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.usage(this_run, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval'})\noutput = doc.entity('dat:yuxiao_yzhang11.crimeZip', {prov.model.PROV_LABEL: 'CrimeZip', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAssociatedWith(this_run, this_script)\ndoc.used(this_run, resource, startTime)\ndoc.wasAttributedTo(output, this_script)\ndoc.wasGeneratedBy(output, this_run, endTime)\ndoc.wasDerivedFrom(output, resource, this_run, this_run, this_run)\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n crime = repo['yuxiao_yzhang11.crime']\n group = {'_id': '$Location', 'count': {'$sum': 1}}\n crimeCor = crime.aggregate([{'$group': group}])\n repo.dropCollection('crimeCor')\n repo.createCollection('crimeCor')\n repo['yuxiao_yzhang11.crimeCor'].insert_many(crimeCor)\n repo.dropCollection('crimeZip')\n repo.createCollection('crimeZip')\n crime_zips = []\n for i in crimeCor:\n cor = i['_id'].replace('(', '')\n cor = cor.replace(')', '')\n if cor != '0.0, 0.0':\n geolocator = Nominatim()\n location = geolocator.reverse(cor, timeout=10)\n try:\n zip = location.raw['address']['postcode']\n i['zip'] = zip\n print(i)\n crime_zips.append(i)\n repo['yuxiao_yzhang11.crimeZip'].insert(i)\n except KeyError as e:\n print('Error: ', e, ', at cord: ', cor, ', and zip: ', zip, ', and location: ', location)\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n this_script = doc.agent('alg:yuxiao_yzhang11#getCrimeZip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('dat:20127to20158crimeincident1edit', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n this_run = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.usage(this_run, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval'})\n output = doc.entity('dat:yuxiao_yzhang11.crimeZip', {prov.model.PROV_LABEL: 'CrimeZip', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAssociatedWith(this_run, this_script)\n doc.used(this_run, resource, startTime)\n doc.wasAttributedTo(output, this_script)\n doc.wasGeneratedBy(output, this_run, endTime)\n doc.wasDerivedFrom(output, resource, this_run, this_run, this_run)\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "getCrimeZip", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass getCrimeZip:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n crime = repo['yuxiao_yzhang11.crime']\n group = {'_id': '$Location', 'count': {'$sum': 1}}\n crimeCor = crime.aggregate([{'$group': group}])\n repo.dropCollection('crimeCor')\n repo.createCollection('crimeCor')\n repo['yuxiao_yzhang11.crimeCor'].insert_many(crimeCor)\n repo.dropCollection('crimeZip')\n repo.createCollection('crimeZip')\n crime_zips = []\n for i in crimeCor:\n cor = i['_id'].replace('(', '')\n cor = cor.replace(')', '')\n if cor != '0.0, 0.0':\n geolocator = Nominatim()\n location = geolocator.reverse(cor, timeout=10)\n try:\n zip = location.raw['address']['postcode']\n i['zip'] = zip\n print(i)\n crime_zips.append(i)\n repo['yuxiao_yzhang11.crimeZip'].insert(i)\n except KeyError as e:\n print('Error: ', e, ', at cord: ', cor, ', and zip: ', zip, ', and location: ', location)\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n this_script = doc.agent('alg:yuxiao_yzhang11#getCrimeZip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('dat:20127to20158crimeincident1edit', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n this_run = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.usage(this_run, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval'})\n output = doc.entity('dat:yuxiao_yzhang11.crimeZip', {prov.model.PROV_LABEL: 'CrimeZip', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAssociatedWith(this_run, this_script)\n doc.used(this_run, resource, startTime)\n doc.wasAttributedTo(output, this_script)\n doc.wasGeneratedBy(output, this_run, endTime)\n doc.wasDerivedFrom(output, resource, this_run, this_run, this_run)\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000293", "length_bytes": 4548, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets (not using the API here for the sake of simplicity).", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012363", "prompt": "Implement the Python class `getCrimeZip` described below.\n\nClass description:\nImplement the getCrimeZip class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `getCrimeZip` described below.\n\nClass description:\nImplement the getCrimeZip class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets (not using the API here for the sake of simplicity).\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass getCrimeZip:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n crime = repo['yuxiao_yzhang11.crime']\n group = {'_id': '$Location', 'count': {'$sum': 1}}\n crimeCor = crime.aggregate([{'$group': group}])\n repo.dropCollection('crimeCor')\n repo.createCollection('crimeCor')\n repo['yuxiao_yzhang11.crimeCor'].insert_many(crimeCor)\n repo.dropCollection('crimeZip')\n repo.createCollection('crimeZip')\n crime_zips = []\n for i in crimeCor:\n cor = i['_id'].replace('(', '')\n cor = cor.replace(')', '')\n if cor != '0.0, 0.0':\n geolocator = Nominatim()\n location = geolocator.reverse(cor, timeout=10)\n try:\n zip = location.raw['address']['postcode']\n i['zip'] = zip\n print(i)\n crime_zips.append(i)\n repo['yuxiao_yzhang11.crimeZip'].insert(i)\n except KeyError as e:\n print('Error: ', e, ', at cord: ', cor, ', and zip: ', zip, ', and location: ', location)\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n this_script = doc.agent('alg:yuxiao_yzhang11#getCrimeZip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('dat:20127to20158crimeincident1edit', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n this_run = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.usage(this_run, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval'})\n output = doc.entity('dat:yuxiao_yzhang11.crimeZip', {prov.model.PROV_LABEL: 'CrimeZip', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAssociatedWith(this_run, this_script)\n doc.used(this_run, resource, startTime)\n doc.wasAttributedTo(output, this_script)\n doc.wasGeneratedBy(output, this_run, endTime)\n doc.wasDerivedFrom(output, resource, this_run, this_run, this_run)\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "b5ccaad97f6e35f9580e645ca764f36eb3406f43", "skeleton": "<|skeleton|>\nclass getCrimeZip:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class getCrimeZip:\n def execute(trial=False):\n \"\"\"Retrieve some data sets (not using the API here for the sake of simplicity).\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n crime = repo['yuxiao_yzhang11.crime']\n group = {'_id': '$Location', 'count': {'$sum': 1}}\n crimeCor = crime.aggregate([{'$group': group}])\n repo.dropCollection('crimeCor')\n repo.createCollection('crimeCor')\n repo['yuxiao_yzhang11.crimeCor'].insert_many(crimeCor)\n repo.dropCollection('crimeZip')\n repo.createCollection('crimeZip')\n crime_zips = []\n for i in crimeCor:\n cor = i['_id'].replace('(', '')\n cor = cor.replace(')', '')\n if cor != '0.0, 0.0':\n geolocator = Nominatim()\n location = geolocator.reverse(cor, timeout=10)\n try:\n zip = location.raw['address']['postcode']\n i['zip'] = zip\n print(i)\n crime_zips.append(i)\n repo['yuxiao_yzhang11.crimeZip'].insert(i)\n except KeyError as e:\n print('Error: ', e, ', at cord: ', cor, ', and zip: ', zip, ', and location: ', location)\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('yuxiao_yzhang11', 'yuxiao_yzhang11')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n this_script = doc.agent('alg:yuxiao_yzhang11#getCrimeZip', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n resource = doc.entity('dat:20127to20158crimeincident1edit', {'prov:label': '311, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n this_run = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.usage(this_run, resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval'})\n output = doc.entity('dat:yuxiao_yzhang11.crimeZip', {prov.model.PROV_LABEL: 'CrimeZip', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAssociatedWith(this_run, this_script)\n doc.used(this_run, resource, startTime)\n doc.wasAttributedTo(output, this_script)\n doc.wasGeneratedBy(output, this_run, endTime)\n doc.wasDerivedFrom(output, resource, this_run, this_run, this_run)\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "yuxiao_yzhang11/getCrimeZip.py", "source_repo": "dwang1995/course-2018-spr-proj", "split": "test", "star_events_count": 1} {"blob_id": "34c66274cdb233c734e8edf1826fa119e8bd66b8", "bodies": ["daepath = geo_cfg['room']\nself.planes = []\nmesh = co.Collada(daepath)\nfor obj in mesh.scene.objects('geometry'):\n for triset in obj.primitives():\n if type(triset) != co.triangleset.BoundTriangleSet:\n log.info('Warning: non-supported primitive ignored!')\n continue\n for jp, tri in enumerate(triset):\n name = '{}-{}'.format(obj.original.name, jp)\n vertices = np.array(tri.vertices)\n normal = np.float32(tri.normals[0] / np.linalg.norm(tri.normals[0]))\n vert_x, vert_y, normal_nig = vert_2d(normal, vertices)\n area = np.float64(triangle_area(vertices))\n centroid = np.float32(triangle_centroid(vertices))\n alpha_v = np.float32(alpha[jp])\n plane = ra_cpp.Planecpp(name, False, vertices, normal, vert_x, vert_y, normal_nig, area, centroid, alpha_v, s[jp])\n self.planes.append(plane)\nself.total_area = total_area(self.planes)\nself.volume = volume(self.planes)", "fig = plt.figure()\nax = fig.gca(projection='3d')\nfor plane in self.planes:\n ax.scatter(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2], color='blue')\n verts = [list(zip(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2]))]\n collection = Poly3DCollection(verts, linewidths=1, alpha=0.5, edgecolor='gray')\n face_color = 'silver'\n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n if normals == 'on':\n ax.quiver(plane.centroid[0], plane.centroid[1], plane.centroid[2], plane.normal[0], plane.normal[1], plane.normal[2], length=1, color='red', normalize=True)\nax.set_xlabel('X axis')\nax.set_ylabel('Y axis')\nax.set_zlabel('Z axis')\nplt.show()"], "bodies_text": "<|body_start_0|>\n daepath = geo_cfg['room']\n self.planes = []\n mesh = co.Collada(daepath)\n for obj in mesh.scene.objects('geometry'):\n for triset in obj.primitives():\n if type(triset) != co.triangleset.BoundTriangleSet:\n log.info('Warning: non-supported primitive ignored!')\n continue\n for jp, tri in enumerate(triset):\n name = '{}-{}'.format(obj.original.name, jp)\n vertices = np.array(tri.vertices)\n normal = np.float32(tri.normals[0] / np.linalg.norm(tri.normals[0]))\n vert_x, vert_y, normal_nig = vert_2d(normal, vertices)\n area = np.float64(triangle_area(vertices))\n centroid = np.float32(triangle_centroid(vertices))\n alpha_v = np.float32(alpha[jp])\n plane = ra_cpp.Planecpp(name, False, vertices, normal, vert_x, vert_y, normal_nig, area, centroid, alpha_v, s[jp])\n self.planes.append(plane)\n self.total_area = total_area(self.planes)\n self.volume = volume(self.planes)\n<|end_body_0|>\n\n<|body_start_1|>\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n for plane in self.planes:\n ax.scatter(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2], color='blue')\n verts = [list(zip(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2]))]\n collection = Poly3DCollection(verts, linewidths=1, alpha=0.5, edgecolor='gray')\n face_color = 'silver'\n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n if normals == 'on':\n ax.quiver(plane.centroid[0], plane.centroid[1], plane.centroid[2], plane.normal[0], plane.normal[1], plane.normal[2], length=1, color='red', normalize=True)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n plt.show()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Geometry", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Geometry:\n\n def __init__(self, geo_cfg, alpha, s):\n \"\"\"Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)\"\"\"\n <|body_0|>\n\n def plot_dae_room(self, normals='off'):\n \"\"\"a simple plot of the room - not redered\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n daepath = geo_cfg['room']\n self.planes = []\n mesh = co.Collada(daepath)\n for obj in mesh.scene.objects('geometry'):\n for triset in obj.primitives():\n if type(triset) != co.triangleset.BoundTriangleSet:\n log.info('Warning: non-supported primitive ignored!')\n continue\n for jp, tri in enumerate(triset):\n name = '{}-{}'.format(obj.original.name, jp)\n vertices = np.array(tri.vertices)\n normal = np.float32(tri.normals[0] / np.linalg.norm(tri.normals[0]))\n vert_x, vert_y, normal_nig = vert_2d(normal, vertices)\n area = np.float64(triangle_area(vertices))\n centroid = np.float32(triangle_centroid(vertices))\n alpha_v = np.float32(alpha[jp])\n plane = ra_cpp.Planecpp(name, False, vertices, normal, vert_x, vert_y, normal_nig, area, centroid, alpha_v, s[jp])\n self.planes.append(plane)\n self.total_area = total_area(self.planes)\n self.volume = volume(self.planes)\n<|end_body_0|>\n\n<|body_start_1|>\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n for plane in self.planes:\n ax.scatter(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2], color='blue')\n verts = [list(zip(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2]))]\n collection = Poly3DCollection(verts, linewidths=1, alpha=0.5, edgecolor='gray')\n face_color = 'silver'\n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n if normals == 'on':\n ax.quiver(plane.centroid[0], plane.centroid[1], plane.centroid[2], plane.normal[0], plane.normal[1], plane.normal[2], length=1, color='red', normalize=True)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n plt.show()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000294", "length_bytes": 18836, "license_type": "no_license", "methods": [{"docstring": "Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)", "name": "__init__", "signature": "def __init__(self, geo_cfg, alpha, s)"}, {"docstring": "a simple plot of the room - not redered", "name": "plot_dae_room", "signature": "def plot_dae_room(self, normals='off')"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_024957", "prompt": "Implement the Python class `Geometry` described below.\n\nClass description:\nImplement the Geometry class.\n\nMethod signatures and docstrings:\n- def __init__(self, geo_cfg, alpha, s): Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)\n- def plot_dae_room(self, normals='off'): a simple plot of the room - not redered", "prompted_full_text": "Implement the Python class `Geometry` described below.\n\nClass description:\nImplement the Geometry class.\n\nMethod signatures and docstrings:\n- def __init__(self, geo_cfg, alpha, s): Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)\n- def plot_dae_room(self, normals='off'): a simple plot of the room - not redered\n\n<|skeleton|>\nclass Geometry:\n\n def __init__(self, geo_cfg, alpha, s):\n \"\"\"Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)\"\"\"\n <|body_0|>\n\n def plot_dae_room(self, normals='off'):\n \"\"\"a simple plot of the room - not redered\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n daepath = geo_cfg['room']\n self.planes = []\n mesh = co.Collada(daepath)\n for obj in mesh.scene.objects('geometry'):\n for triset in obj.primitives():\n if type(triset) != co.triangleset.BoundTriangleSet:\n log.info('Warning: non-supported primitive ignored!')\n continue\n for jp, tri in enumerate(triset):\n name = '{}-{}'.format(obj.original.name, jp)\n vertices = np.array(tri.vertices)\n normal = np.float32(tri.normals[0] / np.linalg.norm(tri.normals[0]))\n vert_x, vert_y, normal_nig = vert_2d(normal, vertices)\n area = np.float64(triangle_area(vertices))\n centroid = np.float32(triangle_centroid(vertices))\n alpha_v = np.float32(alpha[jp])\n plane = ra_cpp.Planecpp(name, False, vertices, normal, vert_x, vert_y, normal_nig, area, centroid, alpha_v, s[jp])\n self.planes.append(plane)\n self.total_area = total_area(self.planes)\n self.volume = volume(self.planes)\n<|end_body_0|>\n\n<|body_start_1|>\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n for plane in self.planes:\n ax.scatter(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2], color='blue')\n verts = [list(zip(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2]))]\n collection = Poly3DCollection(verts, linewidths=1, alpha=0.5, edgecolor='gray')\n face_color = 'silver'\n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n if normals == 'on':\n ax.quiver(plane.centroid[0], plane.centroid[1], plane.centroid[2], plane.normal[0], plane.normal[1], plane.normal[2], length=1, color='red', normalize=True)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n plt.show()\n<|end_body_1|>\n", "revision_id": "345be1496d2c3f59f0aaa3594977f495c80f50e3", "skeleton": "<|skeleton|>\nclass Geometry:\n\n def __init__(self, geo_cfg, alpha, s):\n \"\"\"Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)\"\"\"\n <|body_0|>\n\n def plot_dae_room(self, normals='off'):\n \"\"\"a simple plot of the room - not redered\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Geometry:\n def __init__(self, geo_cfg, alpha, s):\n \"\"\"Set up the room geometry from the .dae file Geometry consists of: Volume, Total ara and an array of plane objects. Each plane object will be processed in a c++ class and have the following att: - name (string) - bounding box (bool) - list of vertices (Eigen - Nvert x 3) - normal (Eigen - 1 x 3) - vert_x - 2D polygon x coord (Eigen - 1 x Nvert) - vert_y - 2D polygon y coord (Eigen - 1 x Nvert) - nig - 2D normal components index (Eigen - 1 x 2) - area (double) - centroid (Eigen - 1 x 3) - alpha - absorption coefficient (Eigen - 1 x Nfreq) - s - scattering coefficient (double)\"\"\"\n daepath = geo_cfg['room']\n self.planes = []\n mesh = co.Collada(daepath)\n for obj in mesh.scene.objects('geometry'):\n for triset in obj.primitives():\n if type(triset) != co.triangleset.BoundTriangleSet:\n log.info('Warning: non-supported primitive ignored!')\n continue\n for jp, tri in enumerate(triset):\n name = '{}-{}'.format(obj.original.name, jp)\n vertices = np.array(tri.vertices)\n normal = np.float32(tri.normals[0] / np.linalg.norm(tri.normals[0]))\n vert_x, vert_y, normal_nig = vert_2d(normal, vertices)\n area = np.float64(triangle_area(vertices))\n centroid = np.float32(triangle_centroid(vertices))\n alpha_v = np.float32(alpha[jp])\n plane = ra_cpp.Planecpp(name, False, vertices, normal, vert_x, vert_y, normal_nig, area, centroid, alpha_v, s[jp])\n self.planes.append(plane)\n self.total_area = total_area(self.planes)\n self.volume = volume(self.planes)\n\n def plot_dae_room(self, normals='off'):\n \"\"\"a simple plot of the room - not redered\"\"\"\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n for plane in self.planes:\n ax.scatter(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2], color='blue')\n verts = [list(zip(plane.vertices[:, 0], plane.vertices[:, 1], plane.vertices[:, 2]))]\n collection = Poly3DCollection(verts, linewidths=1, alpha=0.5, edgecolor='gray')\n face_color = 'silver'\n collection.set_facecolor(face_color)\n ax.add_collection3d(collection)\n if normals == 'on':\n ax.quiver(plane.centroid[0], plane.centroid[1], plane.centroid[2], plane.normal[0], plane.normal[1], plane.normal[2], length=1, color='red', normalize=True)\n ax.set_xlabel('X axis')\n ax.set_ylabel('Y axis')\n ax.set_zlabel('Z axis')\n plt.show()\n", "source": "the_stack_v2_python_sparse", "source_path": "ra/room.py", "source_repo": "pokjnb/ra", "split": "test", "star_events_count": 0} {"blob_id": "48c465e44c706d49317aa6a4b55332d23166d421", "bodies": ["super(CNN, self).__init__()\nself.input_channel_count = input_channel_count\nself.output_channel_count = output_channel_count\nself.conv = nn.Conv1d(in_channels=input_channel_count, out_channels=output_channel_count, kernel_size=kernel_size)\nself.max_pool = nn.AdaptiveMaxPool1d(output_size=1)\nself.relu = nn.ReLU()", "assert x_reshaped.size()[1] == self.input_channel_count, print(f'{x_reshaped.size()} x_reshaped size')\nx_conv = self.conv(x_reshaped)\nassert x_conv.size()[1] == self.output_channel_count, print(f'{x_conv.size()} x_conv size')\nx_conv_out = self.max_pool(x_conv).squeeze(dim=2)\nassert x_conv_out.size()[1] == self.output_channel_count, print(f'{x_conv_out.size()} x_conv_out size')\nreturn x_conv_out"], "bodies_text": "<|body_start_0|>\n super(CNN, self).__init__()\n self.input_channel_count = input_channel_count\n self.output_channel_count = output_channel_count\n self.conv = nn.Conv1d(in_channels=input_channel_count, out_channels=output_channel_count, kernel_size=kernel_size)\n self.max_pool = nn.AdaptiveMaxPool1d(output_size=1)\n self.relu = nn.ReLU()\n<|end_body_0|>\n\n<|body_start_1|>\n assert x_reshaped.size()[1] == self.input_channel_count, print(f'{x_reshaped.size()} x_reshaped size')\n x_conv = self.conv(x_reshaped)\n assert x_conv.size()[1] == self.output_channel_count, print(f'{x_conv.size()} x_conv size')\n x_conv_out = self.max_pool(x_conv).squeeze(dim=2)\n assert x_conv_out.size()[1] == self.output_channel_count, print(f'{x_conv_out.size()} x_conv_out size')\n return x_conv_out\n<|end_body_1|>\n", "class_docstring": "CNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input", "class_name": "CNN", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CNN:\n \"\"\"CNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, input_channel_count, output_channel_count, kernel_size=5):\n \"\"\"Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int\"\"\"\n <|body_0|>\n\n def forward(self, x_reshaped):\n \"\"\"Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CNN, self).__init__()\n self.input_channel_count = input_channel_count\n self.output_channel_count = output_channel_count\n self.conv = nn.Conv1d(in_channels=input_channel_count, out_channels=output_channel_count, kernel_size=kernel_size)\n self.max_pool = nn.AdaptiveMaxPool1d(output_size=1)\n self.relu = nn.ReLU()\n<|end_body_0|>\n\n<|body_start_1|>\n assert x_reshaped.size()[1] == self.input_channel_count, print(f'{x_reshaped.size()} x_reshaped size')\n x_conv = self.conv(x_reshaped)\n assert x_conv.size()[1] == self.output_channel_count, print(f'{x_conv.size()} x_conv size')\n x_conv_out = self.max_pool(x_conv).squeeze(dim=2)\n assert x_conv_out.size()[1] == self.output_channel_count, print(f'{x_conv_out.size()} x_conv_out size')\n return x_conv_out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000295", "length_bytes": 2032, "license_type": "no_license", "methods": [{"docstring": "Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int", "name": "__init__", "signature": "def __init__(self, input_channel_count, output_channel_count, kernel_size=5)"}, {"docstring": "Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)", "name": "forward", "signature": "def forward(self, x_reshaped)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_053354", "prompt": "Implement the Python class `CNN` described below.\n\nClass description:\nCNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input\n\nMethod signatures and docstrings:\n- def __init__(self, input_channel_count, output_channel_count, kernel_size=5): Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int\n- def forward(self, x_reshaped): Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)", "prompted_full_text": "Implement the Python class `CNN` described below.\n\nClass description:\nCNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input\n\nMethod signatures and docstrings:\n- def __init__(self, input_channel_count, output_channel_count, kernel_size=5): Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int\n- def forward(self, x_reshaped): Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)\n\n<|skeleton|>\nclass CNN:\n \"\"\"CNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, input_channel_count, output_channel_count, kernel_size=5):\n \"\"\"Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int\"\"\"\n <|body_0|>\n\n def forward(self, x_reshaped):\n \"\"\"Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CNN, self).__init__()\n self.input_channel_count = input_channel_count\n self.output_channel_count = output_channel_count\n self.conv = nn.Conv1d(in_channels=input_channel_count, out_channels=output_channel_count, kernel_size=kernel_size)\n self.max_pool = nn.AdaptiveMaxPool1d(output_size=1)\n self.relu = nn.ReLU()\n<|end_body_0|>\n\n<|body_start_1|>\n assert x_reshaped.size()[1] == self.input_channel_count, print(f'{x_reshaped.size()} x_reshaped size')\n x_conv = self.conv(x_reshaped)\n assert x_conv.size()[1] == self.output_channel_count, print(f'{x_conv.size()} x_conv size')\n x_conv_out = self.max_pool(x_conv).squeeze(dim=2)\n assert x_conv_out.size()[1] == self.output_channel_count, print(f'{x_conv_out.size()} x_conv_out size')\n return x_conv_out\n<|end_body_1|>\n", "revision_id": "a883935d779dca3a3cc443c3fa6d6a455f21e87a", "skeleton": "<|skeleton|>\nclass CNN:\n \"\"\"CNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, input_channel_count, output_channel_count, kernel_size=5):\n \"\"\"Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int\"\"\"\n <|body_0|>\n\n def forward(self, x_reshaped):\n \"\"\"Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CNN:\n \"\"\"CNN Layer, i.e. a layer of cnn network that takes the output of convolutional network as input\"\"\"\n\n def __init__(self, input_channel_count, output_channel_count, kernel_size=5):\n \"\"\"Init HighWay Instance. @param input_channel_count: int @param output_channel_count: int @param kernel_size: int\"\"\"\n super(CNN, self).__init__()\n self.input_channel_count = input_channel_count\n self.output_channel_count = output_channel_count\n self.conv = nn.Conv1d(in_channels=input_channel_count, out_channels=output_channel_count, kernel_size=kernel_size)\n self.max_pool = nn.AdaptiveMaxPool1d(output_size=1)\n self.relu = nn.ReLU()\n\n def forward(self, x_reshaped):\n \"\"\"Run a forward step that map a batch of x_reshaped to x_conv_out @param x_reshaped: tensor of (max_sentence_length * batch_size, e_char, max_word_length) @return x_conv_out: tensor of (max_sentence_length * batch_size, e_word)\"\"\"\n assert x_reshaped.size()[1] == self.input_channel_count, print(f'{x_reshaped.size()} x_reshaped size')\n x_conv = self.conv(x_reshaped)\n assert x_conv.size()[1] == self.output_channel_count, print(f'{x_conv.size()} x_conv size')\n x_conv_out = self.max_pool(x_conv).squeeze(dim=2)\n assert x_conv_out.size()[1] == self.output_channel_count, print(f'{x_conv_out.size()} x_conv_out size')\n return x_conv_out\n", "source": "the_stack_v2_python_sparse", "source_path": "stanford_nlp/a5/cnn.py", "source_repo": "guocongyun/ml-projects", "split": "test", "star_events_count": 0} {"blob_id": "9ebdb8f5958731d0b5b8e6d9c84a026d4ec6f6ab", "bodies": ["out_x = u_x * scale_x + u_y * scale_y + shift_x\nout_y = u_x * scale_y + u_y * scale_x + shift_y\nreturn (out_x, out_y)", "u_r, u_d = get_real_and_double(u)\nout = ops.abs(u_r) + ops.abs(u_d)\nreturn out", "norm = self.get_norm(u)\nout = norm ** 2\nreturn out"], "bodies_text": "<|body_start_0|>\n out_x = u_x * scale_x + u_y * scale_y + shift_x\n out_y = u_x * scale_y + u_y * scale_x + shift_y\n return (out_x, out_y)\n<|end_body_0|>\n\n<|body_start_1|>\n u_r, u_d = get_real_and_double(u)\n out = ops.abs(u_r) + ops.abs(u_d)\n return out\n<|end_body_1|>\n\n<|body_start_2|>\n norm = self.get_norm(u)\n out = norm ** 2\n return out\n<|end_body_2|>\n", "class_docstring": "The implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b", "class_name": "_BatchNormImpl", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-proprietary-license", "MPL-1.0", "OpenSSL", "LGPL-3.0-only", "LicenseRef-scancode-warranty-disclaimer", "BSD-3-Clause-Open-MPI", "MIT", "MPL-2.0-no-copyleft-exception", "NTP", "BSD-3-Clause", "GPL-1.0-or-later", "0BSD", "MPL-2.0", "LicenseRef-scancode-free-unknown", "AGPL-3.0-only", "Libpng", "MPL-1.1", "IJG", "GPL-2.0-only", "BSL-1.0", "Zlib", "LicenseRef-scancode-public-domain", "LicenseRef-scancode-python-cwi", "BSD-2-Clause", "LicenseRef-scancode-gary-s-brown", "LGPL-2.1-only", "LicenseRef-scancode-other-permissive", "Python-2.0", "LicenseRef-scancode-mit-nagy", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _BatchNormImpl:\n \"\"\"The implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b\"\"\"\n\n def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A\"\"\"\n <|body_0|>\n\n def get_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of\"\"\"\n <|body_1|>\n\n def get_square_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n out_x = u_x * scale_x + u_y * scale_y + shift_x\n out_y = u_x * scale_y + u_y * scale_x + shift_y\n return (out_x, out_y)\n<|end_body_0|>\n\n<|body_start_1|>\n u_r, u_d = get_real_and_double(u)\n out = ops.abs(u_r) + ops.abs(u_d)\n return out\n<|end_body_1|>\n\n<|body_start_2|>\n norm = self.get_norm(u)\n out = norm ** 2\n return out\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000296", "length_bytes": 12894, "license_type": "permissive", "methods": [{"docstring": "Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A", "name": "scale_and_shift", "signature": "def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]"}, {"docstring": "Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of", "name": "get_norm", "signature": "def get_norm(self, u: Tensor) -> Tensor"}, {"docstring": "Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape", "name": "get_square_norm", "signature": "def get_square_norm(self, u: Tensor) -> Tensor"}], "n_methods": 3, "prompt": "Implement the Python class `_BatchNormImpl` described below.\n\nClass description:\nThe implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b\n\nMethod signatures and docstrings:\n- def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]: Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A\n- def get_norm(self, u: Tensor) -> Tensor: Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of\n- def get_square_norm(self, u: Tensor) -> Tensor: Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape", "prompted_full_text": "Implement the Python class `_BatchNormImpl` described below.\n\nClass description:\nThe implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b\n\nMethod signatures and docstrings:\n- def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]: Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A\n- def get_norm(self, u: Tensor) -> Tensor: Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of\n- def get_square_norm(self, u: Tensor) -> Tensor: Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape\n\n<|skeleton|>\nclass _BatchNormImpl:\n \"\"\"The implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b\"\"\"\n\n def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A\"\"\"\n <|body_0|>\n\n def get_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of\"\"\"\n <|body_1|>\n\n def get_square_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n out_x = u_x * scale_x + u_y * scale_y + shift_x\n out_y = u_x * scale_y + u_y * scale_x + shift_y\n return (out_x, out_y)\n<|end_body_0|>\n\n<|body_start_1|>\n u_r, u_d = get_real_and_double(u)\n out = ops.abs(u_r) + ops.abs(u_d)\n return out\n<|end_body_1|>\n\n<|body_start_2|>\n norm = self.get_norm(u)\n out = norm ** 2\n return out\n<|end_body_2|>\n", "revision_id": "54acb15d435533c815ee1bd9f6dc0b56b4d4cf83", "skeleton": "<|skeleton|>\nclass _BatchNormImpl:\n \"\"\"The implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b\"\"\"\n\n def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A\"\"\"\n <|body_0|>\n\n def get_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of\"\"\"\n <|body_1|>\n\n def get_square_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class _BatchNormImpl:\n \"\"\"The implementor class of the Batch Normalization layer for double numbers in regular representation. Implements the functionality specific to double numbers and needed by the 'BatchNorm' class. This includes: getting the norm of double number, applying scaling and shift to a double-valued tensor, and updating the running mean and variance, which are used during inference. Args: affine (bool) - A bool value. When set to True, gamma and beta can be learned. use_batch_statistics (bool): If true, use the mean value and variance value of current batch data. If false, use the mean value and variance value of specified value. If None, the training process will use the mean and variance of current b\"\"\"\n\n def scale_and_shift(self, u_x: Tensor, u_y: Tensor, scale_x: Tensor, scale_y: Tensor, shift_x: Tensor, shift_y: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Applies double scaling and shift to an input tensor in regular representation. This function implements the operation as: .. math:: \\\\begin{align} \\\\text{Re(out)} = \\\\text{Re(inp)} * \\\\text{Re(scale)} + \\\\text{Db(inp)} * \\\\text{Db(scale)} + \\\\text{Re(shift)}\\\\\\\\ \\\\text{Db(out)} = \\\\text{Re(inp)} * \\\\text{Db(scale)} + \\\\text{Db(inp)} * \\\\text{Re(scale)} + \\\\text{Db(shift)}, \\\\end{align} where :math:`inp` is the double input tensors, :math:`scale` and :math:`shift` are double parameters representing the scaling and shift coefficients respectively. :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. A\"\"\"\n out_x = u_x * scale_x + u_y * scale_y + shift_x\n out_y = u_x * scale_y + u_y * scale_x + shift_y\n return (out_x, out_y)\n\n def get_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = |Re(inp)| + |Db(inp)|, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Db(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has two components. Returns: Tensor of shape (*, ..., *). The count and size of dimensions of\"\"\"\n u_r, u_d = get_real_and_double(u)\n out = ops.abs(u_r) + ops.abs(u_d)\n return out\n\n def get_square_norm(self, u: Tensor) -> Tensor:\n \"\"\"Calculates element-wise squared norm of double elements of an input tensor in regular representation. Norm is a non-negative real number that is a characteristic of 'magnitude' of that number, i.e. how far away it is from zero. The function implements the operation as: .. math:: \\\\text{out} = \\\\left(|Re(inp)| + |Db(inp)|\\\\right)^2, where :math:`inp` is the double input tensors, :math:`\\\\text{Re(...)}` and :math:`\\\\text{Du(...)}` are respectively real and double parts of the double-valued expression inside the parentheses. Args: u (Tensor): Tensor of shape (2, *, ..., *). '2' denotes that the input tensor belongs to the double domain and has a real and a double parts. Returns: Tensor of shape\"\"\"\n norm = self.get_norm(u)\n out = norm ** 2\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "mindspore/python/mindspore/hypercomplex/double/_double_bn_impl.py", "source_repo": "mindspore-ai/mindspore", "split": "test", "star_events_count": 4178} {"blob_id": "f6cdb0aa12295975268147fcc0a0e13637d9374d", "bodies": ["rs = self._get_r()\nts = self._get_t()\n\ndef mk_xs(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.cos(t)\nreturn mk_xs", "rs = self._get_r()\nts = self._get_t()\n\ndef mk_ys(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.sin(t)\nreturn mk_ys"], "bodies_text": "<|body_start_0|>\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_xs(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.cos(t)\n return mk_xs\n<|end_body_0|>\n\n<|body_start_1|>\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_ys(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.sin(t)\n return mk_ys\n<|end_body_1|>\n", "class_docstring": "A helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo", "class_name": "CartesianConversion", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CartesianConversion:\n \"\"\"A helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo\"\"\"\n\n def _get_x(self):\n \"\"\"The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}\"\"\"\n <|body_0|>\n\n def _get_y(self):\n \"\"\"The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_xs(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.cos(t)\n return mk_xs\n<|end_body_0|>\n\n<|body_start_1|>\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_ys(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.sin(t)\n return mk_ys\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000297", "length_bytes": 3620, "license_type": "permissive", "methods": [{"docstring": "The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}", "name": "_get_x", "signature": "def _get_x(self)"}, {"docstring": "The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}", "name": "_get_y", "signature": "def _get_y(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031850", "prompt": "Implement the Python class `CartesianConversion` described below.\n\nClass description:\nA helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo\n\nMethod signatures and docstrings:\n- def _get_x(self): The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}\n- def _get_y(self): The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}", "prompted_full_text": "Implement the Python class `CartesianConversion` described below.\n\nClass description:\nA helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo\n\nMethod signatures and docstrings:\n- def _get_x(self): The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}\n- def _get_y(self): The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}\n\n<|skeleton|>\nclass CartesianConversion:\n \"\"\"A helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo\"\"\"\n\n def _get_x(self):\n \"\"\"The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}\"\"\"\n <|body_0|>\n\n def _get_y(self):\n \"\"\"The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_xs(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.cos(t)\n return mk_xs\n<|end_body_0|>\n\n<|body_start_1|>\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_ys(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.sin(t)\n return mk_ys\n<|end_body_1|>\n", "revision_id": "84f3a74cf9cb29c6d24b990dc9a474562114392b", "skeleton": "<|skeleton|>\nclass CartesianConversion:\n \"\"\"A helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo\"\"\"\n\n def _get_x(self):\n \"\"\"The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}\"\"\"\n <|body_0|>\n\n def _get_y(self):\n \"\"\"The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CartesianConversion:\n \"\"\"A helper class for defining new domain classes. Say you are implementing a new class, perhaps the :code:`Rotation` domain transform where the interesting part is the adjustments to the :code:`r` and :code:`t` coordinates. The implementation of :code:`x` and :code:`y` is then done with respect to the transformed :code:`r` and :code:`t` variables. This class provides implementations of the :code:`_get_xs` and :code:`_get_ys` methods to handle this conversion for you automatically, leaving you to focus on implementing the transform that interest you. To use this class, simply include it in your :code:`class` definition as follows .. code-block:: python class MyDomain(CartesianConversion, RealDo\"\"\"\n\n def _get_x(self):\n \"\"\"The conversion to the :math:`x` component in terms of :math:`r` and :math:`\\\\theta` .. math:: x = r\\\\cos{(\\\\theta)}\"\"\"\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_xs(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.cos(t)\n return mk_xs\n\n def _get_y(self):\n \"\"\"The conversion to the :math:`y` component in terms of :math:`r` and :math:`\\\\theta` .. math:: y = r\\\\sin{(\\\\theta)}\"\"\"\n rs = self._get_r()\n ts = self._get_t()\n\n def mk_ys(width, height):\n r = rs(width, height)\n t = ts(width, height)\n return r * np.sin(t)\n return mk_ys\n", "source": "the_stack_v2_python_sparse", "source_path": "stylo/domain/helpers.py", "source_repo": "mvinoba/stylo", "split": "test", "star_events_count": 0} {"blob_id": "d688c4654df7234a0de1efe0d70865bf64fa6468", "bodies": ["super(SimpleCnn, self).__init__()\nself.conv_num = 3\nself.conv_layers = nn.ModuleList([None] * self.conv_num)\nself.bn_layers = nn.ModuleList([None] * self.conv_num)\nself.relu_layers = nn.ModuleList([None] * self.conv_num)\nself.pool_layers = nn.ModuleList([None] * self.conv_num)\nconv_layer_names = ['conv_layer_{}'.format(i) for i in range(self.conv_num)]\ninp_filters = 3\nout_size = 32\nfor i, key in enumerate(conv_layer_names):\n out_filters = desc[key]['filters']\n kernel_size = desc[key]['kernel_size']\n padding = (kernel_size - 1) // 2\n self.conv_layers[i] = nn.Conv2d(inp_filters, out_filters, padding=padding, kernel_size=kernel_size)\n if 'bn' in desc[key].keys():\n if desc[key]['bn']:\n self.bn_layers[i] = nn.BatchNorm2d(out_filters)\n if 'relu' in desc[key].keys():\n if desc[key]['relu']:\n self.relu_layers[i] = nn.ReLU(inplace=False)\n self.pool_layers[i] = nn.MaxPool2d(2, stride=2)\n inp_filters = out_filters\n out_size = out_size // 2\nfc_inp_size = inp_filters * out_size * out_size\nfc_out_size = desc['fully_connect']['output_unit']\nself.fc0 = nn.Linear(fc_inp_size, fc_out_size)\nself.fc0_relu = nn.ReLU(inplace=True)\nfc_inp_size = fc_out_size\nfc_out_size = 10\nself.fc1 = nn.Linear(fc_inp_size, fc_out_size)", "for i in range(self.conv_num):\n x = self.conv_layers[i](x)\n if self.bn_layers[i] is not None:\n x = self.bn_layers[i](x)\n if self.relu_layers[i] is not None:\n x = self.relu_layers[i](x)\n x = self.pool_layers[i](x)\nx = self.fc0(x.view(x.size(0), -1))\nx = self.fc0_relu(x)\nx = self.fc1(x)\nreturn x"], "bodies_text": "<|body_start_0|>\n super(SimpleCnn, self).__init__()\n self.conv_num = 3\n self.conv_layers = nn.ModuleList([None] * self.conv_num)\n self.bn_layers = nn.ModuleList([None] * self.conv_num)\n self.relu_layers = nn.ModuleList([None] * self.conv_num)\n self.pool_layers = nn.ModuleList([None] * self.conv_num)\n conv_layer_names = ['conv_layer_{}'.format(i) for i in range(self.conv_num)]\n inp_filters = 3\n out_size = 32\n for i, key in enumerate(conv_layer_names):\n out_filters = desc[key]['filters']\n kernel_size = desc[key]['kernel_size']\n padding = (kernel_size - 1) // 2\n self.conv_layers[i] = nn.Conv2d(inp_filters, out_filters, padding=padding, kernel_size=kernel_size)\n if 'bn' in desc[key].keys():\n if desc[key]['bn']:\n self.bn_layers[i] = nn.BatchNorm2d(out_filters)\n if 'relu' in desc[key].keys():\n if desc[key]['relu']:\n self.relu_layers[i] = nn.ReLU(inplace=False)\n self.pool_layers[i] = nn.MaxPool2d(2, stride=2)\n inp_filters = out_filters\n out_size = out_size // 2\n fc_inp_size = inp_filters * out_size * out_size\n fc_out_size = desc['fully_connect']['output_unit']\n self.fc0 = nn.Linear(fc_inp_size, fc_out_size)\n self.fc0_relu = nn.ReLU(inplace=True)\n fc_inp_size = fc_out_size\n fc_out_size = 10\n self.fc1 = nn.Linear(fc_inp_size, fc_out_size)\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(self.conv_num):\n x = self.conv_layers[i](x)\n if self.bn_layers[i] is not None:\n x = self.bn_layers[i](x)\n if self.relu_layers[i] is not None:\n x = self.relu_layers[i](x)\n x = self.pool_layers[i](x)\n x = self.fc0(x.view(x.size(0), -1))\n x = self.fc0_relu(x)\n x = self.fc1(x)\n return x\n<|end_body_1|>\n", "class_docstring": "Simple CNN network.", "class_name": "SimpleCnn", "detected_licenses": ["Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SimpleCnn:\n \"\"\"Simple CNN network.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SimpleCnn, self).__init__()\n self.conv_num = 3\n self.conv_layers = nn.ModuleList([None] * self.conv_num)\n self.bn_layers = nn.ModuleList([None] * self.conv_num)\n self.relu_layers = nn.ModuleList([None] * self.conv_num)\n self.pool_layers = nn.ModuleList([None] * self.conv_num)\n conv_layer_names = ['conv_layer_{}'.format(i) for i in range(self.conv_num)]\n inp_filters = 3\n out_size = 32\n for i, key in enumerate(conv_layer_names):\n out_filters = desc[key]['filters']\n kernel_size = desc[key]['kernel_size']\n padding = (kernel_size - 1) // 2\n self.conv_layers[i] = nn.Conv2d(inp_filters, out_filters, padding=padding, kernel_size=kernel_size)\n if 'bn' in desc[key].keys():\n if desc[key]['bn']:\n self.bn_layers[i] = nn.BatchNorm2d(out_filters)\n if 'relu' in desc[key].keys():\n if desc[key]['relu']:\n self.relu_layers[i] = nn.ReLU(inplace=False)\n self.pool_layers[i] = nn.MaxPool2d(2, stride=2)\n inp_filters = out_filters\n out_size = out_size // 2\n fc_inp_size = inp_filters * out_size * out_size\n fc_out_size = desc['fully_connect']['output_unit']\n self.fc0 = nn.Linear(fc_inp_size, fc_out_size)\n self.fc0_relu = nn.ReLU(inplace=True)\n fc_inp_size = fc_out_size\n fc_out_size = 10\n self.fc1 = nn.Linear(fc_inp_size, fc_out_size)\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(self.conv_num):\n x = self.conv_layers[i](x)\n if self.bn_layers[i] is not None:\n x = self.bn_layers[i](x)\n if self.relu_layers[i] is not None:\n x = self.relu_layers[i](x)\n x = self.pool_layers[i](x)\n x = self.fc0(x.view(x.size(0), -1))\n x = self.fc0_relu(x)\n x = self.fc1(x)\n return x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000298", "length_bytes": 2930, "license_type": "permissive", "methods": [{"docstring": "Initialize.", "name": "__init__", "signature": "def __init__(self, desc)"}, {"docstring": "Forward.", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 2, "prompt": "Implement the Python class `SimpleCnn` described below.\n\nClass description:\nSimple CNN network.\n\nMethod signatures and docstrings:\n- def __init__(self, desc): Initialize.\n- def forward(self, x): Forward.", "prompted_full_text": "Implement the Python class `SimpleCnn` described below.\n\nClass description:\nSimple CNN network.\n\nMethod signatures and docstrings:\n- def __init__(self, desc): Initialize.\n- def forward(self, x): Forward.\n\n<|skeleton|>\nclass SimpleCnn:\n \"\"\"Simple CNN network.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SimpleCnn, self).__init__()\n self.conv_num = 3\n self.conv_layers = nn.ModuleList([None] * self.conv_num)\n self.bn_layers = nn.ModuleList([None] * self.conv_num)\n self.relu_layers = nn.ModuleList([None] * self.conv_num)\n self.pool_layers = nn.ModuleList([None] * self.conv_num)\n conv_layer_names = ['conv_layer_{}'.format(i) for i in range(self.conv_num)]\n inp_filters = 3\n out_size = 32\n for i, key in enumerate(conv_layer_names):\n out_filters = desc[key]['filters']\n kernel_size = desc[key]['kernel_size']\n padding = (kernel_size - 1) // 2\n self.conv_layers[i] = nn.Conv2d(inp_filters, out_filters, padding=padding, kernel_size=kernel_size)\n if 'bn' in desc[key].keys():\n if desc[key]['bn']:\n self.bn_layers[i] = nn.BatchNorm2d(out_filters)\n if 'relu' in desc[key].keys():\n if desc[key]['relu']:\n self.relu_layers[i] = nn.ReLU(inplace=False)\n self.pool_layers[i] = nn.MaxPool2d(2, stride=2)\n inp_filters = out_filters\n out_size = out_size // 2\n fc_inp_size = inp_filters * out_size * out_size\n fc_out_size = desc['fully_connect']['output_unit']\n self.fc0 = nn.Linear(fc_inp_size, fc_out_size)\n self.fc0_relu = nn.ReLU(inplace=True)\n fc_inp_size = fc_out_size\n fc_out_size = 10\n self.fc1 = nn.Linear(fc_inp_size, fc_out_size)\n<|end_body_0|>\n\n<|body_start_1|>\n for i in range(self.conv_num):\n x = self.conv_layers[i](x)\n if self.bn_layers[i] is not None:\n x = self.bn_layers[i](x)\n if self.relu_layers[i] is not None:\n x = self.relu_layers[i](x)\n x = self.pool_layers[i](x)\n x = self.fc0(x.view(x.size(0), -1))\n x = self.fc0_relu(x)\n x = self.fc1(x)\n return x\n<|end_body_1|>\n", "revision_id": "df51ed9c1d6dbde1deef63f2a037a369f8554406", "skeleton": "<|skeleton|>\nclass SimpleCnn:\n \"\"\"Simple CNN network.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Initialize.\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"Forward.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SimpleCnn:\n \"\"\"Simple CNN network.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Initialize.\"\"\"\n super(SimpleCnn, self).__init__()\n self.conv_num = 3\n self.conv_layers = nn.ModuleList([None] * self.conv_num)\n self.bn_layers = nn.ModuleList([None] * self.conv_num)\n self.relu_layers = nn.ModuleList([None] * self.conv_num)\n self.pool_layers = nn.ModuleList([None] * self.conv_num)\n conv_layer_names = ['conv_layer_{}'.format(i) for i in range(self.conv_num)]\n inp_filters = 3\n out_size = 32\n for i, key in enumerate(conv_layer_names):\n out_filters = desc[key]['filters']\n kernel_size = desc[key]['kernel_size']\n padding = (kernel_size - 1) // 2\n self.conv_layers[i] = nn.Conv2d(inp_filters, out_filters, padding=padding, kernel_size=kernel_size)\n if 'bn' in desc[key].keys():\n if desc[key]['bn']:\n self.bn_layers[i] = nn.BatchNorm2d(out_filters)\n if 'relu' in desc[key].keys():\n if desc[key]['relu']:\n self.relu_layers[i] = nn.ReLU(inplace=False)\n self.pool_layers[i] = nn.MaxPool2d(2, stride=2)\n inp_filters = out_filters\n out_size = out_size // 2\n fc_inp_size = inp_filters * out_size * out_size\n fc_out_size = desc['fully_connect']['output_unit']\n self.fc0 = nn.Linear(fc_inp_size, fc_out_size)\n self.fc0_relu = nn.ReLU(inplace=True)\n fc_inp_size = fc_out_size\n fc_out_size = 10\n self.fc1 = nn.Linear(fc_inp_size, fc_out_size)\n\n def forward(self, x):\n \"\"\"Forward.\"\"\"\n for i in range(self.conv_num):\n x = self.conv_layers[i](x)\n if self.bn_layers[i] is not None:\n x = self.bn_layers[i](x)\n if self.relu_layers[i] is not None:\n x = self.relu_layers[i](x)\n x = self.pool_layers[i](x)\n x = self.fc0(x.view(x.size(0), -1))\n x = self.fc0_relu(x)\n x = self.fc1(x)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/search_space/networks/pytorch/customs/simplecnn.py", "source_repo": "Huawei-Ascend/modelzoo", "split": "test", "star_events_count": 1} {"blob_id": "e7259d1d17fd980f647f2796b5223949daf4e8c5", "bodies": ["if L <= 0:\n raise ValueError('Input L parameter should be strct. positive.')\nif Nit is not None:\n if Nit <= 0:\n raise ValueError('Input number of iteration is non-positive.')\n if Nit > 1000000.0:\n raise ValueError('Input number of iterations is really high.')\nif init is not None:\n if init.shape != shape:\n raise ValueError('Input init shape and shape parameter do not match.')\nelse:\n np.random.seed(1)\n init = np.random.randn(*shape)\nif not isinstance(verbose, bool):\n raise ValueError('Input verbose parameter is not boolean.')\n_logger.info('Setting up new FISTA optimizer.')\nself.f = f\nself.df = df\nself.g = g\nself.pg = pg\nself.L = L\nself.shape = shape\nself.Nit = Nit\nself.init = init\nself.verbose = verbose\nself.Nit_max = 1000 if self.Nit is None else self.Nit\nself.tau = 0.99 / self.L\nself.E = np.zeros(self.Nit_max)\nself.lim = 0.0001", "if np.allclose(self.E[n - 2], 0):\n return None\nelse:\n return np.abs(self.E[n - 1] - self.E[n - 2]) / (self.E[n - 2] * self.tau)", "if self.Nit is not None:\n return n < self.Nit\nelse:\n if n < 2:\n return True\n if n >= self.Nit_max:\n return False\n else:\n critera = self.StopCritera(n)\n if critera is None:\n if self.verbose:\n print('Iterations stopped as the functional is allclose to 0.')\n return False\n else:\n return critera > self.lim", "_logger.info('Starting FISTA optimization.')\nstart = time.time()\nX0 = self.init\nn = 0\ntheta = 1\nXm1 = X0\nXy = X0\nwhile self.StopTest(n):\n if self.verbose:\n if n >= 2:\n critera = self.StopCritera(n)\n print('n: {}, f + g: {:.3e}, critera: {:.5f} (goal: {:.1e})'.format(n, self.E[n - 1], 0 if critera is None else critera, self.lim))\n else:\n print('n: {}'.format(n))\n X = Xy - self.tau * self.df(Xy)\n X = self.pg(X)\n thetap1 = 0.5 * (1 + math.sqrt(1 + 4 * theta ** 2))\n Xy = X + (theta - 1) / thetap1 * (X - Xm1)\n theta = thetap1\n Xm1 = X\n self.E[n] = self.f(X) + self.g(X)\n n = n + 1\nself.E = self.E[:n]\nInfoOut = {'E': self.E, 'time': time.time() - start}\n_logger.info('FISTA optimization finished.')\nreturn (X, InfoOut)"], "bodies_text": "<|body_start_0|>\n if L <= 0:\n raise ValueError('Input L parameter should be strct. positive.')\n if Nit is not None:\n if Nit <= 0:\n raise ValueError('Input number of iteration is non-positive.')\n if Nit > 1000000.0:\n raise ValueError('Input number of iterations is really high.')\n if init is not None:\n if init.shape != shape:\n raise ValueError('Input init shape and shape parameter do not match.')\n else:\n np.random.seed(1)\n init = np.random.randn(*shape)\n if not isinstance(verbose, bool):\n raise ValueError('Input verbose parameter is not boolean.')\n _logger.info('Setting up new FISTA optimizer.')\n self.f = f\n self.df = df\n self.g = g\n self.pg = pg\n self.L = L\n self.shape = shape\n self.Nit = Nit\n self.init = init\n self.verbose = verbose\n self.Nit_max = 1000 if self.Nit is None else self.Nit\n self.tau = 0.99 / self.L\n self.E = np.zeros(self.Nit_max)\n self.lim = 0.0001\n<|end_body_0|>\n\n<|body_start_1|>\n if np.allclose(self.E[n - 2], 0):\n return None\n else:\n return np.abs(self.E[n - 1] - self.E[n - 2]) / (self.E[n - 2] * self.tau)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.Nit is not None:\n return n < self.Nit\n else:\n if n < 2:\n return True\n if n >= self.Nit_max:\n return False\n else:\n critera = self.StopCritera(n)\n if critera is None:\n if self.verbose:\n print('Iterations stopped as the functional is allclose to 0.')\n return False\n else:\n return critera > self.lim\n<|end_body_2|>\n\n<|body_start_3|>\n _logger.info('Starting FISTA optimization.')\n start = time.time()\n X0 = self.init\n n = 0\n theta = 1\n Xm1 = X0\n Xy = X0\n while self.StopTest(n):\n if self.verbose:\n if n >= 2:\n critera = self.StopCritera(n)\n print('n: {}, f + g: {:.3e}, critera: {:.5f} (goal: {:.1e})'.format(n, self.E[n - 1], 0 if critera is None else critera, self.lim))\n else:\n print('n: {}'.format(n))\n X = Xy - self.tau * self.df(Xy)\n X = self.pg(X)\n thetap1 = 0.5 * (1 + math.sqrt(1 + 4 * theta ** 2))\n Xy = X + (theta - 1) / thetap1 * (X - Xm1)\n theta = thetap1\n Xm1 = X\n self.E[n] = self.f(X) + self.g(X)\n n = n + 1\n self.E = self.E[:n]\n InfoOut = {'E': self.E, 'time': time.time() - start}\n _logger.info('FISTA optimization finished.')\n return (X, InfoOut)\n<|end_body_3|>\n", "class_docstring": "Fast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent", "class_name": "FISTA", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FISTA:\n \"\"\"Fast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent\"\"\"\n\n def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True):\n \"\"\"Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.\"\"\"\n <|body_0|>\n\n def StopCritera(self, n):\n \"\"\"This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.\"\"\"\n <|body_1|>\n\n def StopTest(self, n):\n \"\"\"This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?\"\"\"\n <|body_2|>\n\n def execute(self):\n \"\"\"Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if L <= 0:\n raise ValueError('Input L parameter should be strct. positive.')\n if Nit is not None:\n if Nit <= 0:\n raise ValueError('Input number of iteration is non-positive.')\n if Nit > 1000000.0:\n raise ValueError('Input number of iterations is really high.')\n if init is not None:\n if init.shape != shape:\n raise ValueError('Input init shape and shape parameter do not match.')\n else:\n np.random.seed(1)\n init = np.random.randn(*shape)\n if not isinstance(verbose, bool):\n raise ValueError('Input verbose parameter is not boolean.')\n _logger.info('Setting up new FISTA optimizer.')\n self.f = f\n self.df = df\n self.g = g\n self.pg = pg\n self.L = L\n self.shape = shape\n self.Nit = Nit\n self.init = init\n self.verbose = verbose\n self.Nit_max = 1000 if self.Nit is None else self.Nit\n self.tau = 0.99 / self.L\n self.E = np.zeros(self.Nit_max)\n self.lim = 0.0001\n<|end_body_0|>\n\n<|body_start_1|>\n if np.allclose(self.E[n - 2], 0):\n return None\n else:\n return np.abs(self.E[n - 1] - self.E[n - 2]) / (self.E[n - 2] * self.tau)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.Nit is not None:\n return n < self.Nit\n else:\n if n < 2:\n return True\n if n >= self.Nit_max:\n return False\n else:\n critera = self.StopCritera(n)\n if critera is None:\n if self.verbose:\n print('Iterations stopped as the functional is allclose to 0.')\n return False\n else:\n return critera > self.lim\n<|end_body_2|>\n\n<|body_start_3|>\n _logger.info('Starting FISTA optimization.')\n start = time.time()\n X0 = self.init\n n = 0\n theta = 1\n Xm1 = X0\n Xy = X0\n while self.StopTest(n):\n if self.verbose:\n if n >= 2:\n critera = self.StopCritera(n)\n print('n: {}, f + g: {:.3e}, critera: {:.5f} (goal: {:.1e})'.format(n, self.E[n - 1], 0 if critera is None else critera, self.lim))\n else:\n print('n: {}'.format(n))\n X = Xy - self.tau * self.df(Xy)\n X = self.pg(X)\n thetap1 = 0.5 * (1 + math.sqrt(1 + 4 * theta ** 2))\n Xy = X + (theta - 1) / thetap1 * (X - Xm1)\n theta = thetap1\n Xm1 = X\n self.E[n] = self.f(X) + self.g(X)\n n = n + 1\n self.E = self.E[:n]\n InfoOut = {'E': self.E, 'time': time.time() - start}\n _logger.info('FISTA optimization finished.')\n return (X, InfoOut)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000299", "length_bytes": 7697, "license_type": "permissive", "methods": [{"docstring": "Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.", "name": "__init__", "signature": "def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True)"}, {"docstring": "This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.", "name": "StopCritera", "signature": "def StopCritera(self, n)"}, {"docstring": "This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?", "name": "StopTest", "signature": "def StopTest(self, n)"}, {"docstring": "Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.", "name": "execute", "signature": "def execute(self)"}], "n_methods": 4, "prompt": "Implement the Python class `FISTA` described below.\n\nClass description:\nFast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent\n\nMethod signatures and docstrings:\n- def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True): Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.\n- def StopCritera(self, n): This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.\n- def StopTest(self, n): This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?\n- def execute(self): Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.", "prompted_full_text": "Implement the Python class `FISTA` described below.\n\nClass description:\nFast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent\n\nMethod signatures and docstrings:\n- def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True): Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.\n- def StopCritera(self, n): This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.\n- def StopTest(self, n): This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?\n- def execute(self): Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.\n\n<|skeleton|>\nclass FISTA:\n \"\"\"Fast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent\"\"\"\n\n def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True):\n \"\"\"Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.\"\"\"\n <|body_0|>\n\n def StopCritera(self, n):\n \"\"\"This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.\"\"\"\n <|body_1|>\n\n def StopTest(self, n):\n \"\"\"This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?\"\"\"\n <|body_2|>\n\n def execute(self):\n \"\"\"Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if L <= 0:\n raise ValueError('Input L parameter should be strct. positive.')\n if Nit is not None:\n if Nit <= 0:\n raise ValueError('Input number of iteration is non-positive.')\n if Nit > 1000000.0:\n raise ValueError('Input number of iterations is really high.')\n if init is not None:\n if init.shape != shape:\n raise ValueError('Input init shape and shape parameter do not match.')\n else:\n np.random.seed(1)\n init = np.random.randn(*shape)\n if not isinstance(verbose, bool):\n raise ValueError('Input verbose parameter is not boolean.')\n _logger.info('Setting up new FISTA optimizer.')\n self.f = f\n self.df = df\n self.g = g\n self.pg = pg\n self.L = L\n self.shape = shape\n self.Nit = Nit\n self.init = init\n self.verbose = verbose\n self.Nit_max = 1000 if self.Nit is None else self.Nit\n self.tau = 0.99 / self.L\n self.E = np.zeros(self.Nit_max)\n self.lim = 0.0001\n<|end_body_0|>\n\n<|body_start_1|>\n if np.allclose(self.E[n - 2], 0):\n return None\n else:\n return np.abs(self.E[n - 1] - self.E[n - 2]) / (self.E[n - 2] * self.tau)\n<|end_body_1|>\n\n<|body_start_2|>\n if self.Nit is not None:\n return n < self.Nit\n else:\n if n < 2:\n return True\n if n >= self.Nit_max:\n return False\n else:\n critera = self.StopCritera(n)\n if critera is None:\n if self.verbose:\n print('Iterations stopped as the functional is allclose to 0.')\n return False\n else:\n return critera > self.lim\n<|end_body_2|>\n\n<|body_start_3|>\n _logger.info('Starting FISTA optimization.')\n start = time.time()\n X0 = self.init\n n = 0\n theta = 1\n Xm1 = X0\n Xy = X0\n while self.StopTest(n):\n if self.verbose:\n if n >= 2:\n critera = self.StopCritera(n)\n print('n: {}, f + g: {:.3e}, critera: {:.5f} (goal: {:.1e})'.format(n, self.E[n - 1], 0 if critera is None else critera, self.lim))\n else:\n print('n: {}'.format(n))\n X = Xy - self.tau * self.df(Xy)\n X = self.pg(X)\n thetap1 = 0.5 * (1 + math.sqrt(1 + 4 * theta ** 2))\n Xy = X + (theta - 1) / thetap1 * (X - Xm1)\n theta = thetap1\n Xm1 = X\n self.E[n] = self.f(X) + self.g(X)\n n = n + 1\n self.E = self.E[:n]\n InfoOut = {'E': self.E, 'time': time.time() - start}\n _logger.info('FISTA optimization finished.')\n return (X, InfoOut)\n<|end_body_3|>\n", "revision_id": "7914008374d0eb32362c447052de1af2b1ea9fa9", "skeleton": "<|skeleton|>\nclass FISTA:\n \"\"\"Fast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent\"\"\"\n\n def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True):\n \"\"\"Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.\"\"\"\n <|body_0|>\n\n def StopCritera(self, n):\n \"\"\"This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.\"\"\"\n <|body_1|>\n\n def StopTest(self, n):\n \"\"\"This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?\"\"\"\n <|body_2|>\n\n def execute(self):\n \"\"\"Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FISTA:\n \"\"\"Fast Iterative Shrinkage-Thresholding Algorithm implementation. Attributes ---------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True. Nit_max: int Maximum number of iterations. tau: float Descent\"\"\"\n\n def __init__(self, f, df, L, g, pg, shape, Nit=None, init=None, verbose=True):\n \"\"\"Initialization function for FISTA. Arguments --------- f: function :math:`C^{1,1}` convex function. df: function derivative function of f. L: float Lipshitz contant of f. g: function Non-smooth function. pg: function g poximal operator. shape: tuple The data shape. Nit: None, int Number of iteration. If None, the iterations will stop as soon as the functional no longer evolve. Default is None. init: numpy array Init point which shape is the same as the data. If None, a random initailization is drawn. Default is None. verbose: bool If True, process informations are sent to the output. Default is True.\"\"\"\n if L <= 0:\n raise ValueError('Input L parameter should be strct. positive.')\n if Nit is not None:\n if Nit <= 0:\n raise ValueError('Input number of iteration is non-positive.')\n if Nit > 1000000.0:\n raise ValueError('Input number of iterations is really high.')\n if init is not None:\n if init.shape != shape:\n raise ValueError('Input init shape and shape parameter do not match.')\n else:\n np.random.seed(1)\n init = np.random.randn(*shape)\n if not isinstance(verbose, bool):\n raise ValueError('Input verbose parameter is not boolean.')\n _logger.info('Setting up new FISTA optimizer.')\n self.f = f\n self.df = df\n self.g = g\n self.pg = pg\n self.L = L\n self.shape = shape\n self.Nit = Nit\n self.init = init\n self.verbose = verbose\n self.Nit_max = 1000 if self.Nit is None else self.Nit\n self.tau = 0.99 / self.L\n self.E = np.zeros(self.Nit_max)\n self.lim = 0.0001\n\n def StopCritera(self, n):\n \"\"\"This function computes a critera that informs about the algorithm convergence at step n. Arguments --------- n: int Current step Returns ------- float Value of the critera.\"\"\"\n if np.allclose(self.E[n - 2], 0):\n return None\n else:\n return np.abs(self.E[n - 1] - self.E[n - 2]) / (self.E[n - 2] * self.tau)\n\n def StopTest(self, n):\n \"\"\"This function choose if iterations should be stopped at step n. If Nit is not None, it returns True as long as n is smaller than Nit. If Nit is None, it returns True as long as the functional is evolving fast. Arguments --------- n: int Current step. Returns ------- bool Should the iterations go on ?\"\"\"\n if self.Nit is not None:\n return n < self.Nit\n else:\n if n < 2:\n return True\n if n >= self.Nit_max:\n return False\n else:\n critera = self.StopCritera(n)\n if critera is None:\n if self.verbose:\n print('Iterations stopped as the functional is allclose to 0.')\n return False\n else:\n return critera > self.lim\n\n def execute(self):\n \"\"\"Method that executes the FISTA algorithm. Returns ------- numpy array The optimum of the optimization problem. dict Extra informations about convergence. Note ---- Infos in output dictionary: * :code:`E`: Evolution of the functional along the iterations. * :code:`time`: Execution time.\"\"\"\n _logger.info('Starting FISTA optimization.')\n start = time.time()\n X0 = self.init\n n = 0\n theta = 1\n Xm1 = X0\n Xy = X0\n while self.StopTest(n):\n if self.verbose:\n if n >= 2:\n critera = self.StopCritera(n)\n print('n: {}, f + g: {:.3e}, critera: {:.5f} (goal: {:.1e})'.format(n, self.E[n - 1], 0 if critera is None else critera, self.lim))\n else:\n print('n: {}'.format(n))\n X = Xy - self.tau * self.df(Xy)\n X = self.pg(X)\n thetap1 = 0.5 * (1 + math.sqrt(1 + 4 * theta ** 2))\n Xy = X + (theta - 1) / thetap1 * (X - Xm1)\n theta = thetap1\n Xm1 = X\n self.E[n] = self.f(X) + self.g(X)\n n = n + 1\n self.E = self.E[:n]\n InfoOut = {'E': self.E, 'time': time.time() - start}\n _logger.info('FISTA optimization finished.')\n return (X, InfoOut)\n", "source": "the_stack_v2_python_sparse", "source_path": "inpystem/tools/FISTA.py", "source_repo": "etienne-monier/inpystem", "split": "test", "star_events_count": 4} {"blob_id": "f73c8be6fd5be04eb80d06ca71f8149e70526fdc", "bodies": ["y = np.asarray(y)\nif np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\nX, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=1)\nsuper(BaseTreeQuantileRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)\nself.y_train_ = y\nself.y_train_leaves_ = self.tree_.apply(X)\nreturn self", "X = check_array(X, dtype=np.float32, accept_sparse='csc')\nif quantile is None:\n return super(BaseTreeQuantileRegressor, self).predict(X, check_input=check_input)\nB = u.shape[0]\nr_cos, r_sin = (np.zeros((X.shape[0], B)), np.zeros((X.shape[0], B)))\nX_leaves = self.apply(X)\nunique_leaves = np.unique(X_leaves)\nfor leaf in unique_leaves:\n y = self.y_train_[self.y_train_leaves_ == leaf]\n r_cos[X_leaves == leaf, :] = np.mean(np.cos(y.dot(uv.T)), axis=0)\n r_sin[X_leaves == leaf, :] = np.mean(np.sin(y.dot(uv.T)), axis=0)\nreturn (r_cos, r_sin)"], "bodies_text": "<|body_start_0|>\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=1)\n super(BaseTreeQuantileRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n if quantile is None:\n return super(BaseTreeQuantileRegressor, self).predict(X, check_input=check_input)\n B = u.shape[0]\n r_cos, r_sin = (np.zeros((X.shape[0], B)), np.zeros((X.shape[0], B)))\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n for leaf in unique_leaves:\n y = self.y_train_[self.y_train_leaves_ == leaf]\n r_cos[X_leaves == leaf, :] = np.mean(np.cos(y.dot(uv.T)), axis=0)\n r_sin[X_leaves == leaf, :] = np.mean(np.sin(y.dot(uv.T)), axis=0)\n return (r_cos, r_sin)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BaseTreeQuantileRegressor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BaseTreeQuantileRegressor:\n\n def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):\n \"\"\"Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod\"\"\"\n <|body_0|>\n\n def predict(self, X, u, check_input=False):\n \"\"\"Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=1)\n super(BaseTreeQuantileRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n if quantile is None:\n return super(BaseTreeQuantileRegressor, self).predict(X, check_input=check_input)\n B = u.shape[0]\n r_cos, r_sin = (np.zeros((X.shape[0], B)), np.zeros((X.shape[0], B)))\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n for leaf in unique_leaves:\n y = self.y_train_[self.y_train_leaves_ == leaf]\n r_cos[X_leaves == leaf, :] = np.mean(np.cos(y.dot(uv.T)), axis=0)\n r_sin[X_leaves == leaf, :] = np.mean(np.sin(y.dot(uv.T)), axis=0)\n return (r_cos, r_sin)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000300", "length_bytes": 12551, "license_type": "permissive", "methods": [{"docstring": "Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod", "name": "fit", "signature": "def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None)"}, {"docstring": "Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.", "name": "predict", "signature": "def predict(self, X, u, check_input=False)"}], "n_methods": 2, "prompt": "Implement the Python class `BaseTreeQuantileRegressor` described below.\n\nClass description:\nImplement the BaseTreeQuantileRegressor class.\n\nMethod signatures and docstrings:\n- def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None): Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod\n- def predict(self, X, u, check_input=False): Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.", "prompted_full_text": "Implement the Python class `BaseTreeQuantileRegressor` described below.\n\nClass description:\nImplement the BaseTreeQuantileRegressor class.\n\nMethod signatures and docstrings:\n- def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None): Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod\n- def predict(self, X, u, check_input=False): Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.\n\n<|skeleton|>\nclass BaseTreeQuantileRegressor:\n\n def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):\n \"\"\"Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod\"\"\"\n <|body_0|>\n\n def predict(self, X, u, check_input=False):\n \"\"\"Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=1)\n super(BaseTreeQuantileRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n<|end_body_0|>\n\n<|body_start_1|>\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n if quantile is None:\n return super(BaseTreeQuantileRegressor, self).predict(X, check_input=check_input)\n B = u.shape[0]\n r_cos, r_sin = (np.zeros((X.shape[0], B)), np.zeros((X.shape[0], B)))\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n for leaf in unique_leaves:\n y = self.y_train_[self.y_train_leaves_ == leaf]\n r_cos[X_leaves == leaf, :] = np.mean(np.cos(y.dot(uv.T)), axis=0)\n r_sin[X_leaves == leaf, :] = np.mean(np.sin(y.dot(uv.T)), axis=0)\n return (r_cos, r_sin)\n<|end_body_1|>\n", "revision_id": "a87db29eb786a48a48c0660fe7b2e365aa6b51a8", "skeleton": "<|skeleton|>\nclass BaseTreeQuantileRegressor:\n\n def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):\n \"\"\"Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod\"\"\"\n <|body_0|>\n\n def predict(self, X, u, check_input=False):\n \"\"\"Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BaseTreeQuantileRegressor:\n def fit(self, X, y, sample_weight=None, check_input=True, X_idx_sorted=None):\n \"\"\"Child of BaseDecisionTree (sklearn), which use a single DecisionTree to do the same kind of Quantile things. Parameters ---------- X : array-like or sparse matrix, shape = [n_samples, n_features] The training input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csc_matrix``. y : array-like, shape = [n_samples] or [n_samples, n_outputs] The target values (class labels) as integers or strings. sample_weight : array-like, shape = [n_samples] or None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each nod\"\"\"\n y = np.asarray(y)\n if np.ndim(y) == 2 and y.shape[1] == 1:\n y = np.ravel(y)\n X, y = check_X_y(X, y, accept_sparse='csc', dtype=np.float32, multi_output=1)\n super(BaseTreeQuantileRegressor, self).fit(X, y, sample_weight=sample_weight, check_input=check_input, X_idx_sorted=X_idx_sorted)\n self.y_train_ = y\n self.y_train_leaves_ = self.tree_.apply(X)\n return self\n\n def predict(self, X, u, check_input=False):\n \"\"\"Predict regression value for X. Parameters ---------- X : array-like or sparse matrix of shape = [n_samples, n_features] The input samples. Internally, it will be converted to ``dtype=np.float32`` and if a sparse matrix is provided to a sparse ``csr_matrix``. quantile : int, optional Value ranging from 0 to 100. By default, the mean is returned. check_input : boolean, (default=True) Allow to bypass several input checking. Don't use this parameter unless you know what you do. Returns ------- y : array of shape = [n_samples] If quantile is set to None, then return E(Y | X). Else return y such that F(Y=y | x) = quantile.\"\"\"\n X = check_array(X, dtype=np.float32, accept_sparse='csc')\n if quantile is None:\n return super(BaseTreeQuantileRegressor, self).predict(X, check_input=check_input)\n B = u.shape[0]\n r_cos, r_sin = (np.zeros((X.shape[0], B)), np.zeros((X.shape[0], B)))\n X_leaves = self.apply(X)\n unique_leaves = np.unique(X_leaves)\n for leaf in unique_leaves:\n y = self.y_train_[self.y_train_leaves_ == leaf]\n r_cos[X_leaves == leaf, :] = np.mean(np.cos(y.dot(uv.T)), axis=0)\n r_sin[X_leaves == leaf, :] = np.mean(np.sin(y.dot(uv.T)), axis=0)\n return (r_cos, r_sin)\n", "source": "the_stack_v2_python_sparse", "source_path": "test_func/_QRF.py", "source_repo": "RunzheStat/TestMDP", "split": "test", "star_events_count": 13} {"blob_id": "5a3dc80453bda0bfa9f74dbb92afebbec31fb5f7", "bodies": ["if lang is not None and (not isinstance(lang, AlgorandLanguages)):\n raise TypeError('Language is not an enumerative of AlgorandLanguages')\nsuper().__init__(lang.value if lang is not None else lang, Bip39WordsListFinder, Bip39WordsListGetter)", "mnemonic_obj = AlgorandMnemonic.FromString(mnemonic) if isinstance(mnemonic, str) else mnemonic\nif mnemonic_obj.WordsCount() not in AlgorandMnemonicConst.MNEMONIC_WORD_NUM:\n raise ValueError(f'Mnemonic words count is not valid ({mnemonic_obj.WordsCount()})')\nwords = mnemonic_obj.ToList()\nwords_list, _ = self._FindLanguage(mnemonic_obj)\nword_indexes = [words_list.GetWordIdx(w) for w in words]\nentropy_list = AlgorandMnemonicUtils.ConvertBits(word_indexes[:-1], 11, 8)\nassert entropy_list is not None\nentropy_bytes = BytesUtils.FromList(entropy_list)[:-1]\nself.__ValidateChecksum(entropy_bytes, word_indexes[-1], words_list)\nreturn entropy_bytes", "chksum_word_idx = AlgorandMnemonicUtils.ComputeChecksumWordIndex(entropy_bytes)\nif chksum_word_idx != chksum_word_idx_exp:\n raise MnemonicChecksumError(f'Invalid checksum (expected {words_list.GetWordAtIdx(chksum_word_idx)}, got {words_list.GetWordAtIdx(chksum_word_idx_exp)})')"], "bodies_text": "<|body_start_0|>\n if lang is not None and (not isinstance(lang, AlgorandLanguages)):\n raise TypeError('Language is not an enumerative of AlgorandLanguages')\n super().__init__(lang.value if lang is not None else lang, Bip39WordsListFinder, Bip39WordsListGetter)\n<|end_body_0|>\n\n<|body_start_1|>\n mnemonic_obj = AlgorandMnemonic.FromString(mnemonic) if isinstance(mnemonic, str) else mnemonic\n if mnemonic_obj.WordsCount() not in AlgorandMnemonicConst.MNEMONIC_WORD_NUM:\n raise ValueError(f'Mnemonic words count is not valid ({mnemonic_obj.WordsCount()})')\n words = mnemonic_obj.ToList()\n words_list, _ = self._FindLanguage(mnemonic_obj)\n word_indexes = [words_list.GetWordIdx(w) for w in words]\n entropy_list = AlgorandMnemonicUtils.ConvertBits(word_indexes[:-1], 11, 8)\n assert entropy_list is not None\n entropy_bytes = BytesUtils.FromList(entropy_list)[:-1]\n self.__ValidateChecksum(entropy_bytes, word_indexes[-1], words_list)\n return entropy_bytes\n<|end_body_1|>\n\n<|body_start_2|>\n chksum_word_idx = AlgorandMnemonicUtils.ComputeChecksumWordIndex(entropy_bytes)\n if chksum_word_idx != chksum_word_idx_exp:\n raise MnemonicChecksumError(f'Invalid checksum (expected {words_list.GetWordAtIdx(chksum_word_idx)}, got {words_list.GetWordAtIdx(chksum_word_idx_exp)})')\n<|end_body_2|>\n", "class_docstring": "Algorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.", "class_name": "AlgorandMnemonicDecoder", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AlgorandMnemonicDecoder:\n \"\"\"Algorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.\"\"\"\n\n def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None:\n \"\"\"Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid\"\"\"\n <|body_0|>\n\n def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes:\n \"\"\"Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid\"\"\"\n <|body_1|>\n\n def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None:\n \"\"\"Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if lang is not None and (not isinstance(lang, AlgorandLanguages)):\n raise TypeError('Language is not an enumerative of AlgorandLanguages')\n super().__init__(lang.value if lang is not None else lang, Bip39WordsListFinder, Bip39WordsListGetter)\n<|end_body_0|>\n\n<|body_start_1|>\n mnemonic_obj = AlgorandMnemonic.FromString(mnemonic) if isinstance(mnemonic, str) else mnemonic\n if mnemonic_obj.WordsCount() not in AlgorandMnemonicConst.MNEMONIC_WORD_NUM:\n raise ValueError(f'Mnemonic words count is not valid ({mnemonic_obj.WordsCount()})')\n words = mnemonic_obj.ToList()\n words_list, _ = self._FindLanguage(mnemonic_obj)\n word_indexes = [words_list.GetWordIdx(w) for w in words]\n entropy_list = AlgorandMnemonicUtils.ConvertBits(word_indexes[:-1], 11, 8)\n assert entropy_list is not None\n entropy_bytes = BytesUtils.FromList(entropy_list)[:-1]\n self.__ValidateChecksum(entropy_bytes, word_indexes[-1], words_list)\n return entropy_bytes\n<|end_body_1|>\n\n<|body_start_2|>\n chksum_word_idx = AlgorandMnemonicUtils.ComputeChecksumWordIndex(entropy_bytes)\n if chksum_word_idx != chksum_word_idx_exp:\n raise MnemonicChecksumError(f'Invalid checksum (expected {words_list.GetWordAtIdx(chksum_word_idx)}, got {words_list.GetWordAtIdx(chksum_word_idx_exp)})')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000301", "length_bytes": 5112, "license_type": "permissive", "methods": [{"docstring": "Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid", "name": "__init__", "signature": "def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None"}, {"docstring": "Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid", "name": "Decode", "signature": "def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes"}, {"docstring": "Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid", "name": "__ValidateChecksum", "signature": "def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_017342", "prompt": "Implement the Python class `AlgorandMnemonicDecoder` described below.\n\nClass description:\nAlgorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.\n\nMethod signatures and docstrings:\n- def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None: Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid\n- def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes: Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid\n- def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None: Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid", "prompted_full_text": "Implement the Python class `AlgorandMnemonicDecoder` described below.\n\nClass description:\nAlgorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.\n\nMethod signatures and docstrings:\n- def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None: Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid\n- def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes: Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid\n- def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None: Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid\n\n<|skeleton|>\nclass AlgorandMnemonicDecoder:\n \"\"\"Algorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.\"\"\"\n\n def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None:\n \"\"\"Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid\"\"\"\n <|body_0|>\n\n def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes:\n \"\"\"Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid\"\"\"\n <|body_1|>\n\n def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None:\n \"\"\"Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if lang is not None and (not isinstance(lang, AlgorandLanguages)):\n raise TypeError('Language is not an enumerative of AlgorandLanguages')\n super().__init__(lang.value if lang is not None else lang, Bip39WordsListFinder, Bip39WordsListGetter)\n<|end_body_0|>\n\n<|body_start_1|>\n mnemonic_obj = AlgorandMnemonic.FromString(mnemonic) if isinstance(mnemonic, str) else mnemonic\n if mnemonic_obj.WordsCount() not in AlgorandMnemonicConst.MNEMONIC_WORD_NUM:\n raise ValueError(f'Mnemonic words count is not valid ({mnemonic_obj.WordsCount()})')\n words = mnemonic_obj.ToList()\n words_list, _ = self._FindLanguage(mnemonic_obj)\n word_indexes = [words_list.GetWordIdx(w) for w in words]\n entropy_list = AlgorandMnemonicUtils.ConvertBits(word_indexes[:-1], 11, 8)\n assert entropy_list is not None\n entropy_bytes = BytesUtils.FromList(entropy_list)[:-1]\n self.__ValidateChecksum(entropy_bytes, word_indexes[-1], words_list)\n return entropy_bytes\n<|end_body_1|>\n\n<|body_start_2|>\n chksum_word_idx = AlgorandMnemonicUtils.ComputeChecksumWordIndex(entropy_bytes)\n if chksum_word_idx != chksum_word_idx_exp:\n raise MnemonicChecksumError(f'Invalid checksum (expected {words_list.GetWordAtIdx(chksum_word_idx)}, got {words_list.GetWordAtIdx(chksum_word_idx_exp)})')\n<|end_body_2|>\n", "revision_id": "d15c75ddd74e4838c396a0d036ef6faf11b06a4b", "skeleton": "<|skeleton|>\nclass AlgorandMnemonicDecoder:\n \"\"\"Algorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.\"\"\"\n\n def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None:\n \"\"\"Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid\"\"\"\n <|body_0|>\n\n def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes:\n \"\"\"Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid\"\"\"\n <|body_1|>\n\n def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None:\n \"\"\"Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AlgorandMnemonicDecoder:\n \"\"\"Algorand mnemonic decoder class. It decodes a mnemonic phrase to bytes.\"\"\"\n\n def __init__(self, lang: Optional[AlgorandLanguages]=AlgorandLanguages.ENGLISH) -> None:\n \"\"\"Construct class. Language is set to English by default because Algorand mnemonic only support one language, so it's useless (and slower) to automatically detect the language. Args: lang (AlgorandLanguages, optional): Language, None for automatic detection Raises: TypeError: If the language is not a AlgorandLanguages enum ValueError: If loaded words list is not valid\"\"\"\n if lang is not None and (not isinstance(lang, AlgorandLanguages)):\n raise TypeError('Language is not an enumerative of AlgorandLanguages')\n super().__init__(lang.value if lang is not None else lang, Bip39WordsListFinder, Bip39WordsListGetter)\n\n def Decode(self, mnemonic: Union[str, Mnemonic]) -> bytes:\n \"\"\"Decode a mnemonic phrase to bytes (no checksum). Args: mnemonic (str or Mnemonic object): Mnemonic Returns: bytes: Decoded bytes Raises: MnemonicChecksumError: If checksum is not valid ValueError: If mnemonic is not valid\"\"\"\n mnemonic_obj = AlgorandMnemonic.FromString(mnemonic) if isinstance(mnemonic, str) else mnemonic\n if mnemonic_obj.WordsCount() not in AlgorandMnemonicConst.MNEMONIC_WORD_NUM:\n raise ValueError(f'Mnemonic words count is not valid ({mnemonic_obj.WordsCount()})')\n words = mnemonic_obj.ToList()\n words_list, _ = self._FindLanguage(mnemonic_obj)\n word_indexes = [words_list.GetWordIdx(w) for w in words]\n entropy_list = AlgorandMnemonicUtils.ConvertBits(word_indexes[:-1], 11, 8)\n assert entropy_list is not None\n entropy_bytes = BytesUtils.FromList(entropy_list)[:-1]\n self.__ValidateChecksum(entropy_bytes, word_indexes[-1], words_list)\n return entropy_bytes\n\n def __ValidateChecksum(entropy_bytes: bytes, chksum_word_idx_exp: int, words_list: MnemonicWordsList) -> None:\n \"\"\"Validate a mnemonic checksum. Args: entropy_bytes (list) : Entropy bytes chksum_word_idx_exp (int) : Expected checksum word index words_list (MnemonicWordsList): Words list Raises: MnemonicChecksumError: If checksum is not valid\"\"\"\n chksum_word_idx = AlgorandMnemonicUtils.ComputeChecksumWordIndex(entropy_bytes)\n if chksum_word_idx != chksum_word_idx_exp:\n raise MnemonicChecksumError(f'Invalid checksum (expected {words_list.GetWordAtIdx(chksum_word_idx)}, got {words_list.GetWordAtIdx(chksum_word_idx_exp)})')\n", "source": "the_stack_v2_python_sparse", "source_path": "bip_utils/algorand/mnemonic/algorand_mnemonic_decoder.py", "source_repo": "ebellocchia/bip_utils", "split": "test", "star_events_count": 244} {"blob_id": "8849800023f71028066ad1b3e2b67715cda03de3", "bodies": ["if type(data).__module__ == numpy.__name__:\n return data\nelse:\n return numpy.asarray(data)", "data = SubselectionAlgorithm._numpify(data)\nkeep = numpy.where(data >= cutoff)[0]\nthrow = numpy.where(data < cutoff)[0]\nreturn (keep.tolist(), throw.tolist())", "sorted_indices = SubselectionAlgorithm._numpify(data).argsort()[::-1]\npoint = numpy.ceil(sorted_indices.shape[0] * cutoff)\nkeep = sorted_indices[:point]\nthrow = sorted_indices[point:]\nreturn (keep.tolist(), throw.tolist())", "data = SubselectionAlgorithm._numpify(data)\ndata_scaled = data / numpy.mean(data)\nkeep = numpy.where(data_scaled >= cutoff)[0]\nthrow = numpy.where(data_scaled < cutoff)[0]\nreturn (keep.tolist(), throw.tolist())"], "bodies_text": "<|body_start_0|>\n if type(data).__module__ == numpy.__name__:\n return data\n else:\n return numpy.asarray(data)\n<|end_body_0|>\n\n<|body_start_1|>\n data = SubselectionAlgorithm._numpify(data)\n keep = numpy.where(data >= cutoff)[0]\n throw = numpy.where(data < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n<|end_body_1|>\n\n<|body_start_2|>\n sorted_indices = SubselectionAlgorithm._numpify(data).argsort()[::-1]\n point = numpy.ceil(sorted_indices.shape[0] * cutoff)\n keep = sorted_indices[:point]\n throw = sorted_indices[point:]\n return (keep.tolist(), throw.tolist())\n<|end_body_2|>\n\n<|body_start_3|>\n data = SubselectionAlgorithm._numpify(data)\n data_scaled = data / numpy.mean(data)\n keep = numpy.where(data_scaled >= cutoff)[0]\n throw = numpy.where(data_scaled < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n<|end_body_3|>\n", "class_docstring": "A class to collect all subselection algorithms", "class_name": "SubselectionAlgorithm", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SubselectionAlgorithm:\n \"\"\"A class to collect all subselection algorithms\"\"\"\n\n def _numpify(data):\n \"\"\"Convert a Python array to a Numpy array\"\"\"\n <|body_0|>\n\n def cutoff(data, cutoff=0.287):\n \"\"\"A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_1|>\n\n def linear(data, cutoff=0.5):\n \"\"\"A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_2|>\n\n def scaled(data, cutoff=0.5):\n \"\"\"A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if type(data).__module__ == numpy.__name__:\n return data\n else:\n return numpy.asarray(data)\n<|end_body_0|>\n\n<|body_start_1|>\n data = SubselectionAlgorithm._numpify(data)\n keep = numpy.where(data >= cutoff)[0]\n throw = numpy.where(data < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n<|end_body_1|>\n\n<|body_start_2|>\n sorted_indices = SubselectionAlgorithm._numpify(data).argsort()[::-1]\n point = numpy.ceil(sorted_indices.shape[0] * cutoff)\n keep = sorted_indices[:point]\n throw = sorted_indices[point:]\n return (keep.tolist(), throw.tolist())\n<|end_body_2|>\n\n<|body_start_3|>\n data = SubselectionAlgorithm._numpify(data)\n data_scaled = data / numpy.mean(data)\n keep = numpy.where(data_scaled >= cutoff)[0]\n throw = numpy.where(data_scaled < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000302", "length_bytes": 25594, "license_type": "permissive", "methods": [{"docstring": "Convert a Python array to a Numpy array", "name": "_numpify", "signature": "def _numpify(data)"}, {"docstring": "A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw", "name": "cutoff", "signature": "def cutoff(data, cutoff=0.287)"}, {"docstring": "A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw", "name": "linear", "signature": "def linear(data, cutoff=0.5)"}, {"docstring": "A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw", "name": "scaled", "signature": "def scaled(data, cutoff=0.5)"}], "n_methods": 4, "prompt": "Implement the Python class `SubselectionAlgorithm` described below.\n\nClass description:\nA class to collect all subselection algorithms\n\nMethod signatures and docstrings:\n- def _numpify(data): Convert a Python array to a Numpy array\n- def cutoff(data, cutoff=0.287): A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\n- def linear(data, cutoff=0.5): A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw\n- def scaled(data, cutoff=0.5): A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw", "prompted_full_text": "Implement the Python class `SubselectionAlgorithm` described below.\n\nClass description:\nA class to collect all subselection algorithms\n\nMethod signatures and docstrings:\n- def _numpify(data): Convert a Python array to a Numpy array\n- def cutoff(data, cutoff=0.287): A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\n- def linear(data, cutoff=0.5): A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw\n- def scaled(data, cutoff=0.5): A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\n\n<|skeleton|>\nclass SubselectionAlgorithm:\n \"\"\"A class to collect all subselection algorithms\"\"\"\n\n def _numpify(data):\n \"\"\"Convert a Python array to a Numpy array\"\"\"\n <|body_0|>\n\n def cutoff(data, cutoff=0.287):\n \"\"\"A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_1|>\n\n def linear(data, cutoff=0.5):\n \"\"\"A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_2|>\n\n def scaled(data, cutoff=0.5):\n \"\"\"A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if type(data).__module__ == numpy.__name__:\n return data\n else:\n return numpy.asarray(data)\n<|end_body_0|>\n\n<|body_start_1|>\n data = SubselectionAlgorithm._numpify(data)\n keep = numpy.where(data >= cutoff)[0]\n throw = numpy.where(data < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n<|end_body_1|>\n\n<|body_start_2|>\n sorted_indices = SubselectionAlgorithm._numpify(data).argsort()[::-1]\n point = numpy.ceil(sorted_indices.shape[0] * cutoff)\n keep = sorted_indices[:point]\n throw = sorted_indices[point:]\n return (keep.tolist(), throw.tolist())\n<|end_body_2|>\n\n<|body_start_3|>\n data = SubselectionAlgorithm._numpify(data)\n data_scaled = data / numpy.mean(data)\n keep = numpy.where(data_scaled >= cutoff)[0]\n throw = numpy.where(data_scaled < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n<|end_body_3|>\n", "revision_id": "9b17f6138424bdd088c09018bffd35acf0b871b6", "skeleton": "<|skeleton|>\nclass SubselectionAlgorithm:\n \"\"\"A class to collect all subselection algorithms\"\"\"\n\n def _numpify(data):\n \"\"\"Convert a Python array to a Numpy array\"\"\"\n <|body_0|>\n\n def cutoff(data, cutoff=0.287):\n \"\"\"A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_1|>\n\n def linear(data, cutoff=0.5):\n \"\"\"A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_2|>\n\n def scaled(data, cutoff=0.5):\n \"\"\"A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SubselectionAlgorithm:\n \"\"\"A class to collect all subselection algorithms\"\"\"\n\n def _numpify(data):\n \"\"\"Convert a Python array to a Numpy array\"\"\"\n if type(data).__module__ == numpy.__name__:\n return data\n else:\n return numpy.asarray(data)\n\n def cutoff(data, cutoff=0.287):\n \"\"\"A cutoff-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its score is l ess than the cutoff. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n data = SubselectionAlgorithm._numpify(data)\n keep = numpy.where(data >= cutoff)[0]\n throw = numpy.where(data < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n\n def linear(data, cutoff=0.5):\n \"\"\"A linearly-defined subselection algorithm Description ----------- This algorithm removes the worst 500 decoys. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The porportion of the total number of decoys to keep Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n sorted_indices = SubselectionAlgorithm._numpify(data).argsort()[::-1]\n point = numpy.ceil(sorted_indices.shape[0] * cutoff)\n keep = sorted_indices[:point]\n throw = sorted_indices[point:]\n return (keep.tolist(), throw.tolist())\n\n def scaled(data, cutoff=0.5):\n \"\"\"A scaling-defined subselection algorithm Description ----------- This algorithm removes a decoy, if its scaled score is less than 0.5. The scaled score is calculated by dividing the satisfaction score by the average of the set. Parameters ---------- data : list, tuple A 1D array of scores cutoff : float, optional The cutoff of keeping decoys Returns ------- list The decoy indices to keep list The decoy indices to throw\"\"\"\n data = SubselectionAlgorithm._numpify(data)\n data_scaled = data / numpy.mean(data)\n keep = numpy.where(data_scaled >= cutoff)[0]\n throw = numpy.where(data_scaled < cutoff)[0]\n return (keep.tolist(), throw.tolist())\n", "source": "the_stack_v2_python_sparse", "source_path": "ample/util/contact_util.py", "source_repo": "hlasimpk/ample", "split": "test", "star_events_count": 0} {"blob_id": "87d97a3114b60e54f012305f86338506acb324f1", "bodies": ["item_list = []\ncontent_list = ContentType.objects.all()\nfor item in content_list:\n item_list.append((item.name, item.name))\nreturn item_list", "if value == True:\n queryset = queryset.filter(parent__isnull=True)\nelse:\n queryset = queryset.filter(parent__isnull=False)\nreturn queryset"], "bodies_text": "<|body_start_0|>\n item_list = []\n content_list = ContentType.objects.all()\n for item in content_list:\n item_list.append((item.name, item.name))\n return item_list\n<|end_body_0|>\n\n<|body_start_1|>\n if value == True:\n queryset = queryset.filter(parent__isnull=True)\n else:\n queryset = queryset.filter(parent__isnull=False)\n return queryset\n<|end_body_1|>\n", "class_docstring": "FilterSet for CommentViewSet.", "class_name": "CommentFilterSet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CommentFilterSet:\n \"\"\"FilterSet for CommentViewSet.\"\"\"\n\n def get_content_type():\n \"\"\"Return list for `content_type` choices.\"\"\"\n <|body_0|>\n\n def get_is_parent(self, queryset, name, value):\n \"\"\"Filters queryset based on `is_parent` field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n item_list = []\n content_list = ContentType.objects.all()\n for item in content_list:\n item_list.append((item.name, item.name))\n return item_list\n<|end_body_0|>\n\n<|body_start_1|>\n if value == True:\n queryset = queryset.filter(parent__isnull=True)\n else:\n queryset = queryset.filter(parent__isnull=False)\n return queryset\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000303", "length_bytes": 2199, "license_type": "no_license", "methods": [{"docstring": "Return list for `content_type` choices.", "name": "get_content_type", "signature": "def get_content_type()"}, {"docstring": "Filters queryset based on `is_parent` field.", "name": "get_is_parent", "signature": "def get_is_parent(self, queryset, name, value)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028080", "prompt": "Implement the Python class `CommentFilterSet` described below.\n\nClass description:\nFilterSet for CommentViewSet.\n\nMethod signatures and docstrings:\n- def get_content_type(): Return list for `content_type` choices.\n- def get_is_parent(self, queryset, name, value): Filters queryset based on `is_parent` field.", "prompted_full_text": "Implement the Python class `CommentFilterSet` described below.\n\nClass description:\nFilterSet for CommentViewSet.\n\nMethod signatures and docstrings:\n- def get_content_type(): Return list for `content_type` choices.\n- def get_is_parent(self, queryset, name, value): Filters queryset based on `is_parent` field.\n\n<|skeleton|>\nclass CommentFilterSet:\n \"\"\"FilterSet for CommentViewSet.\"\"\"\n\n def get_content_type():\n \"\"\"Return list for `content_type` choices.\"\"\"\n <|body_0|>\n\n def get_is_parent(self, queryset, name, value):\n \"\"\"Filters queryset based on `is_parent` field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n item_list = []\n content_list = ContentType.objects.all()\n for item in content_list:\n item_list.append((item.name, item.name))\n return item_list\n<|end_body_0|>\n\n<|body_start_1|>\n if value == True:\n queryset = queryset.filter(parent__isnull=True)\n else:\n queryset = queryset.filter(parent__isnull=False)\n return queryset\n<|end_body_1|>\n", "revision_id": "faee901943371ed85f8ecde456b342efdb07e865", "skeleton": "<|skeleton|>\nclass CommentFilterSet:\n \"\"\"FilterSet for CommentViewSet.\"\"\"\n\n def get_content_type():\n \"\"\"Return list for `content_type` choices.\"\"\"\n <|body_0|>\n\n def get_is_parent(self, queryset, name, value):\n \"\"\"Filters queryset based on `is_parent` field.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CommentFilterSet:\n \"\"\"FilterSet for CommentViewSet.\"\"\"\n\n def get_content_type():\n \"\"\"Return list for `content_type` choices.\"\"\"\n item_list = []\n content_list = ContentType.objects.all()\n for item in content_list:\n item_list.append((item.name, item.name))\n return item_list\n\n def get_is_parent(self, queryset, name, value):\n \"\"\"Filters queryset based on `is_parent` field.\"\"\"\n if value == True:\n queryset = queryset.filter(parent__isnull=True)\n else:\n queryset = queryset.filter(parent__isnull=False)\n return queryset\n", "source": "the_stack_v2_python_sparse", "source_path": "comments/viewsets.py", "source_repo": "arviesan24/social_media", "split": "test", "star_events_count": 0} {"blob_id": "2c593149f5c36b102555156d92807150454a16c8", "bodies": ["logs.log_info('You are using the funny current channel: HCN2')\nself.time_unit = 1000.0\nself.v_corr = 0\nV = V - 10\nself.m = 1.0 / (1 + np.exp((V - -99) / 6.2))\nself.h = 1\nself._mpower = 1\nself._hpower = 0\nself._PmCa = 0.05\nself._PmNa = 0.2", "self.vrev = -45\nV = V - 10\nself._mInf = 1.0 / (1 + np.exp((V - -99) / 6.2))\nself._mTau = 184.0\nself._hInf = 1\nself._hTau = 1"], "bodies_text": "<|body_start_0|>\n logs.log_info('You are using the funny current channel: HCN2')\n self.time_unit = 1000.0\n self.v_corr = 0\n V = V - 10\n self.m = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self.h = 1\n self._mpower = 1\n self._hpower = 0\n self._PmCa = 0.05\n self._PmNa = 0.2\n<|end_body_0|>\n\n<|body_start_1|>\n self.vrev = -45\n V = V - 10\n self._mInf = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self._mTau = 184.0\n self._hInf = 1\n self._hTau = 1\n<|end_body_1|>\n", "class_docstring": "HCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).", "class_name": "HCN2", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HCN2:\n \"\"\"HCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n <|body_0|>\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logs.log_info('You are using the funny current channel: HCN2')\n self.time_unit = 1000.0\n self.v_corr = 0\n V = V - 10\n self.m = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self.h = 1\n self._mpower = 1\n self._hpower = 0\n self._PmCa = 0.05\n self._PmNa = 0.2\n<|end_body_0|>\n\n<|body_start_1|>\n self.vrev = -45\n V = V - 10\n self._mInf = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self._mTau = 184.0\n self._hInf = 1\n self._hTau = 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000304", "length_bytes": 14413, "license_type": "no_license", "methods": [{"docstring": "Run initialization calculation for m and h gates of the channel at starting Vmem value.", "name": "_init_state", "signature": "def _init_state(self, V)"}, {"docstring": "Update the state of m and h gates of the channel given their present value and present simulation Vmem.", "name": "_calculate_state", "signature": "def _calculate_state(self, V)"}], "n_methods": 2, "prompt": "Implement the Python class `HCN2` described below.\n\nClass description:\nHCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).\n\nMethod signatures and docstrings:\n- def _init_state(self, V): Run initialization calculation for m and h gates of the channel at starting Vmem value.\n- def _calculate_state(self, V): Update the state of m and h gates of the channel given their present value and present simulation Vmem.", "prompted_full_text": "Implement the Python class `HCN2` described below.\n\nClass description:\nHCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).\n\nMethod signatures and docstrings:\n- def _init_state(self, V): Run initialization calculation for m and h gates of the channel at starting Vmem value.\n- def _calculate_state(self, V): Update the state of m and h gates of the channel given their present value and present simulation Vmem.\n\n<|skeleton|>\nclass HCN2:\n \"\"\"HCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n <|body_0|>\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logs.log_info('You are using the funny current channel: HCN2')\n self.time_unit = 1000.0\n self.v_corr = 0\n V = V - 10\n self.m = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self.h = 1\n self._mpower = 1\n self._hpower = 0\n self._PmCa = 0.05\n self._PmNa = 0.2\n<|end_body_0|>\n\n<|body_start_1|>\n self.vrev = -45\n V = V - 10\n self._mInf = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self._mTau = 184.0\n self._hInf = 1\n self._hTau = 1\n<|end_body_1|>\n", "revision_id": "dd03ff5e3df3ef48d887a6566a6286fcd168880b", "skeleton": "<|skeleton|>\nclass HCN2:\n \"\"\"HCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n <|body_0|>\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HCN2:\n \"\"\"HCN2 model from 21 day old dorsal root ganglion of mouse. HCN channels are voltage-gated ionic channels, regulated by cyclic nucleotides, such as cyclic adenosine-mono-phosphate (cAMP) (not modelled here). In contrast to most Na+ and K+ ionic channels, which open when membrane potential is depolarized, they are opened when the membrane potential hyperpolarizes below -50 mV. This channel is expressed in brain and heart tissue, though the specific function is unknown. Reference: Moosmang S. et al. Cellular expression and functional characterization of four hyperpolarization-activated pacemaker channels in cardiac and neuronal tissues. Eur. J. Biochem., 2001 Mar , 268 (1646-52).\"\"\"\n\n def _init_state(self, V):\n \"\"\"Run initialization calculation for m and h gates of the channel at starting Vmem value.\"\"\"\n logs.log_info('You are using the funny current channel: HCN2')\n self.time_unit = 1000.0\n self.v_corr = 0\n V = V - 10\n self.m = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self.h = 1\n self._mpower = 1\n self._hpower = 0\n self._PmCa = 0.05\n self._PmNa = 0.2\n\n def _calculate_state(self, V):\n \"\"\"Update the state of m and h gates of the channel given their present value and present simulation Vmem.\"\"\"\n self.vrev = -45\n V = V - 10\n self._mInf = 1.0 / (1 + np.exp((V - -99) / 6.2))\n self._mTau = 184.0\n self._hInf = 1\n self._hTau = 1\n", "source": "the_stack_v2_python_sparse", "source_path": "betse/science/channels/vg_funny.py", "source_repo": "R-Stefano/betse-ml", "split": "test", "star_events_count": 0} {"blob_id": "553aa3e8e2e8bf366ec80caf2dfa415dc5e4c5e0", "bodies": ["super(VGGs, self).__init__()\nself.class_num = class_num\nself.vgg_name = vgg_name\nself.attention = attention\nself.in_channels = 1\nself.features = self._make_layers()\nself.classifier = self._classifer_layers()", "layers = []\nfor layer_type in self.cfg[self.vgg_name]:\n if layer_type != 'M':\n layer = [nn.Conv2d(self.in_channels, out_channels=self.in_channels, kernel_size=3, stride=1, padding=1), nn.Conv2d(self.in_channels, out_channels=layer_type, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(num_features=layer_type), nn.ReLU(inplace=True)]\n self.in_channels = layer_type\n else:\n layer = [nn.MaxPool2d(kernel_size=self.pool_kernel_size, stride=self.pool_stride)]\n layers += layer\nif self.attention:\n layers.append(Attention(self.in_channels, self.in_channels))\nlayers.append(nn.AdaptiveAvgPool2d(output_size=(1, 1)))\nvgg_feature_net = nn.Sequential(*layers)\nreturn vgg_feature_net", "layers = []\nlayers.append(nn.Dropout(p=0.5))\nlayers.append(nn.Linear(in_features=self.in_channels, out_features=self.class_num, bias=True))\nvgg_classifer_net = nn.Sequential(*layers)\nreturn vgg_classifer_net", "feature = self.features(x)\nflat_feature = feature.view(feature.size(0), -1)\nout = self.classifier(flat_feature)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(VGGs, self).__init__()\n self.class_num = class_num\n self.vgg_name = vgg_name\n self.attention = attention\n self.in_channels = 1\n self.features = self._make_layers()\n self.classifier = self._classifer_layers()\n<|end_body_0|>\n\n<|body_start_1|>\n layers = []\n for layer_type in self.cfg[self.vgg_name]:\n if layer_type != 'M':\n layer = [nn.Conv2d(self.in_channels, out_channels=self.in_channels, kernel_size=3, stride=1, padding=1), nn.Conv2d(self.in_channels, out_channels=layer_type, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(num_features=layer_type), nn.ReLU(inplace=True)]\n self.in_channels = layer_type\n else:\n layer = [nn.MaxPool2d(kernel_size=self.pool_kernel_size, stride=self.pool_stride)]\n layers += layer\n if self.attention:\n layers.append(Attention(self.in_channels, self.in_channels))\n layers.append(nn.AdaptiveAvgPool2d(output_size=(1, 1)))\n vgg_feature_net = nn.Sequential(*layers)\n return vgg_feature_net\n<|end_body_1|>\n\n<|body_start_2|>\n layers = []\n layers.append(nn.Dropout(p=0.5))\n layers.append(nn.Linear(in_features=self.in_channels, out_features=self.class_num, bias=True))\n vgg_classifer_net = nn.Sequential(*layers)\n return vgg_classifer_net\n<|end_body_2|>\n\n<|body_start_3|>\n feature = self.features(x)\n flat_feature = feature.view(feature.size(0), -1)\n out = self.classifier(flat_feature)\n return out\n<|end_body_3|>\n", "class_docstring": "VGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积", "class_name": "VGGs", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VGGs:\n \"\"\"VGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积\"\"\"\n\n def __init__(self, vgg_name, class_num, attention):\n \"\"\"网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力\"\"\"\n <|body_0|>\n\n def _make_layers(self):\n \"\"\"建立特征提取部分网络 return vgg_feature_net VGG特征提取部分\"\"\"\n <|body_1|>\n\n def _classifer_layers(self):\n \"\"\"建立分类部分网络 return vgg_classifer_net VGG分类部分\"\"\"\n <|body_2|>\n\n def forward(self, x):\n \"\"\"前向传播 :param: x 图片变量 return out: 前向传播输出\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(VGGs, self).__init__()\n self.class_num = class_num\n self.vgg_name = vgg_name\n self.attention = attention\n self.in_channels = 1\n self.features = self._make_layers()\n self.classifier = self._classifer_layers()\n<|end_body_0|>\n\n<|body_start_1|>\n layers = []\n for layer_type in self.cfg[self.vgg_name]:\n if layer_type != 'M':\n layer = [nn.Conv2d(self.in_channels, out_channels=self.in_channels, kernel_size=3, stride=1, padding=1), nn.Conv2d(self.in_channels, out_channels=layer_type, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(num_features=layer_type), nn.ReLU(inplace=True)]\n self.in_channels = layer_type\n else:\n layer = [nn.MaxPool2d(kernel_size=self.pool_kernel_size, stride=self.pool_stride)]\n layers += layer\n if self.attention:\n layers.append(Attention(self.in_channels, self.in_channels))\n layers.append(nn.AdaptiveAvgPool2d(output_size=(1, 1)))\n vgg_feature_net = nn.Sequential(*layers)\n return vgg_feature_net\n<|end_body_1|>\n\n<|body_start_2|>\n layers = []\n layers.append(nn.Dropout(p=0.5))\n layers.append(nn.Linear(in_features=self.in_channels, out_features=self.class_num, bias=True))\n vgg_classifer_net = nn.Sequential(*layers)\n return vgg_classifer_net\n<|end_body_2|>\n\n<|body_start_3|>\n feature = self.features(x)\n flat_feature = feature.view(feature.size(0), -1)\n out = self.classifier(flat_feature)\n return out\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000305", "length_bytes": 36979, "license_type": "no_license", "methods": [{"docstring": "网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力", "name": "__init__", "signature": "def __init__(self, vgg_name, class_num, attention)"}, {"docstring": "建立特征提取部分网络 return vgg_feature_net VGG特征提取部分", "name": "_make_layers", "signature": "def _make_layers(self)"}, {"docstring": "建立分类部分网络 return vgg_classifer_net VGG分类部分", "name": "_classifer_layers", "signature": "def _classifer_layers(self)"}, {"docstring": "前向传播 :param: x 图片变量 return out: 前向传播输出", "name": "forward", "signature": "def forward(self, x)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_036500", "prompt": "Implement the Python class `VGGs` described below.\n\nClass description:\nVGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积\n\nMethod signatures and docstrings:\n- def __init__(self, vgg_name, class_num, attention): 网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力\n- def _make_layers(self): 建立特征提取部分网络 return vgg_feature_net VGG特征提取部分\n- def _classifer_layers(self): 建立分类部分网络 return vgg_classifer_net VGG分类部分\n- def forward(self, x): 前向传播 :param: x 图片变量 return out: 前向传播输出", "prompted_full_text": "Implement the Python class `VGGs` described below.\n\nClass description:\nVGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积\n\nMethod signatures and docstrings:\n- def __init__(self, vgg_name, class_num, attention): 网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力\n- def _make_layers(self): 建立特征提取部分网络 return vgg_feature_net VGG特征提取部分\n- def _classifer_layers(self): 建立分类部分网络 return vgg_classifer_net VGG分类部分\n- def forward(self, x): 前向传播 :param: x 图片变量 return out: 前向传播输出\n\n<|skeleton|>\nclass VGGs:\n \"\"\"VGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积\"\"\"\n\n def __init__(self, vgg_name, class_num, attention):\n \"\"\"网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力\"\"\"\n <|body_0|>\n\n def _make_layers(self):\n \"\"\"建立特征提取部分网络 return vgg_feature_net VGG特征提取部分\"\"\"\n <|body_1|>\n\n def _classifer_layers(self):\n \"\"\"建立分类部分网络 return vgg_classifer_net VGG分类部分\"\"\"\n <|body_2|>\n\n def forward(self, x):\n \"\"\"前向传播 :param: x 图片变量 return out: 前向传播输出\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(VGGs, self).__init__()\n self.class_num = class_num\n self.vgg_name = vgg_name\n self.attention = attention\n self.in_channels = 1\n self.features = self._make_layers()\n self.classifier = self._classifer_layers()\n<|end_body_0|>\n\n<|body_start_1|>\n layers = []\n for layer_type in self.cfg[self.vgg_name]:\n if layer_type != 'M':\n layer = [nn.Conv2d(self.in_channels, out_channels=self.in_channels, kernel_size=3, stride=1, padding=1), nn.Conv2d(self.in_channels, out_channels=layer_type, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(num_features=layer_type), nn.ReLU(inplace=True)]\n self.in_channels = layer_type\n else:\n layer = [nn.MaxPool2d(kernel_size=self.pool_kernel_size, stride=self.pool_stride)]\n layers += layer\n if self.attention:\n layers.append(Attention(self.in_channels, self.in_channels))\n layers.append(nn.AdaptiveAvgPool2d(output_size=(1, 1)))\n vgg_feature_net = nn.Sequential(*layers)\n return vgg_feature_net\n<|end_body_1|>\n\n<|body_start_2|>\n layers = []\n layers.append(nn.Dropout(p=0.5))\n layers.append(nn.Linear(in_features=self.in_channels, out_features=self.class_num, bias=True))\n vgg_classifer_net = nn.Sequential(*layers)\n return vgg_classifer_net\n<|end_body_2|>\n\n<|body_start_3|>\n feature = self.features(x)\n flat_feature = feature.view(feature.size(0), -1)\n out = self.classifier(flat_feature)\n return out\n<|end_body_3|>\n", "revision_id": "2a68fd854bc5b1806319dfc40e36e084f9c4c5d0", "skeleton": "<|skeleton|>\nclass VGGs:\n \"\"\"VGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积\"\"\"\n\n def __init__(self, vgg_name, class_num, attention):\n \"\"\"网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力\"\"\"\n <|body_0|>\n\n def _make_layers(self):\n \"\"\"建立特征提取部分网络 return vgg_feature_net VGG特征提取部分\"\"\"\n <|body_1|>\n\n def _classifer_layers(self):\n \"\"\"建立分类部分网络 return vgg_classifer_net VGG分类部分\"\"\"\n <|body_2|>\n\n def forward(self, x):\n \"\"\"前向传播 :param: x 图片变量 return out: 前向传播输出\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VGGs:\n \"\"\"VGGs神经网络结构搭建, 前几层fc换为全局池化, 尝试深度可分离卷积\"\"\"\n\n def __init__(self, vgg_name, class_num, attention):\n \"\"\"网络初始化 param: vgg_name 选择的VGG网络名 param: class_num 目标分类数 param: attention 注意力\"\"\"\n super(VGGs, self).__init__()\n self.class_num = class_num\n self.vgg_name = vgg_name\n self.attention = attention\n self.in_channels = 1\n self.features = self._make_layers()\n self.classifier = self._classifer_layers()\n\n def _make_layers(self):\n \"\"\"建立特征提取部分网络 return vgg_feature_net VGG特征提取部分\"\"\"\n layers = []\n for layer_type in self.cfg[self.vgg_name]:\n if layer_type != 'M':\n layer = [nn.Conv2d(self.in_channels, out_channels=self.in_channels, kernel_size=3, stride=1, padding=1), nn.Conv2d(self.in_channels, out_channels=layer_type, kernel_size=1, stride=1, padding=0), nn.BatchNorm2d(num_features=layer_type), nn.ReLU(inplace=True)]\n self.in_channels = layer_type\n else:\n layer = [nn.MaxPool2d(kernel_size=self.pool_kernel_size, stride=self.pool_stride)]\n layers += layer\n if self.attention:\n layers.append(Attention(self.in_channels, self.in_channels))\n layers.append(nn.AdaptiveAvgPool2d(output_size=(1, 1)))\n vgg_feature_net = nn.Sequential(*layers)\n return vgg_feature_net\n\n def _classifer_layers(self):\n \"\"\"建立分类部分网络 return vgg_classifer_net VGG分类部分\"\"\"\n layers = []\n layers.append(nn.Dropout(p=0.5))\n layers.append(nn.Linear(in_features=self.in_channels, out_features=self.class_num, bias=True))\n vgg_classifer_net = nn.Sequential(*layers)\n return vgg_classifer_net\n\n def forward(self, x):\n \"\"\"前向传播 :param: x 图片变量 return out: 前向传播输出\"\"\"\n feature = self.features(x)\n flat_feature = feature.view(feature.size(0), -1)\n out = self.classifier(flat_feature)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "code_keh/2d/Pytorch_nets_channel1.py", "source_repo": "ruichen9/3DCTLungDiseaseDiagnosis", "split": "test", "star_events_count": 0} {"blob_id": "3812932b9c7d8d2ae46ac406c102129afcabe53d", "bodies": ["self._crossover_prob = crossover_prob\nself._uniform_prob = uniform_prob\nreturn", "new_org_1 = org_1.copy()\nnew_org_2 = org_2.copy()\ncrossover_chance = random.random()\nif crossover_chance <= self._crossover_prob:\n minlen = min(len(new_org_1.genome), len(new_org_2.genome))\n for i in range(minlen):\n uniform_chance = random.random()\n if uniform_chance <= self._uniform_prob:\n temp = new_org_1.genome[i]\n new_org_1.genome[i] = new_org_2.genome[i]\n new_org_2.genome[i] = temp\nreturn (new_org_1, new_org_2)"], "bodies_text": "<|body_start_0|>\n self._crossover_prob = crossover_prob\n self._uniform_prob = uniform_prob\n return\n<|end_body_0|>\n\n<|body_start_1|>\n new_org_1 = org_1.copy()\n new_org_2 = org_2.copy()\n crossover_chance = random.random()\n if crossover_chance <= self._crossover_prob:\n minlen = min(len(new_org_1.genome), len(new_org_2.genome))\n for i in range(minlen):\n uniform_chance = random.random()\n if uniform_chance <= self._uniform_prob:\n temp = new_org_1.genome[i]\n new_org_1.genome[i] = new_org_2.genome[i]\n new_org_2.genome[i] = temp\n return (new_org_1, new_org_2)\n<|end_body_1|>\n", "class_docstring": "Perform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.", "class_name": "UniformCrossover", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UniformCrossover:\n \"\"\"Perform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.\"\"\"\n\n def __init__(self, crossover_prob=0.1, uniform_prob=0.7):\n \"\"\"Initialize to do uniform crossover at the specified probability and frequency.\"\"\"\n <|body_0|>\n\n def do_crossover(self, org_1, org_2):\n \"\"\"Potentially do a crossover between the two organisms.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._crossover_prob = crossover_prob\n self._uniform_prob = uniform_prob\n return\n<|end_body_0|>\n\n<|body_start_1|>\n new_org_1 = org_1.copy()\n new_org_2 = org_2.copy()\n crossover_chance = random.random()\n if crossover_chance <= self._crossover_prob:\n minlen = min(len(new_org_1.genome), len(new_org_2.genome))\n for i in range(minlen):\n uniform_chance = random.random()\n if uniform_chance <= self._uniform_prob:\n temp = new_org_1.genome[i]\n new_org_1.genome[i] = new_org_2.genome[i]\n new_org_2.genome[i] = temp\n return (new_org_1, new_org_2)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000306", "length_bytes": 1744, "license_type": "permissive", "methods": [{"docstring": "Initialize to do uniform crossover at the specified probability and frequency.", "name": "__init__", "signature": "def __init__(self, crossover_prob=0.1, uniform_prob=0.7)"}, {"docstring": "Potentially do a crossover between the two organisms.", "name": "do_crossover", "signature": "def do_crossover(self, org_1, org_2)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031195", "prompt": "Implement the Python class `UniformCrossover` described below.\n\nClass description:\nPerform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.\n\nMethod signatures and docstrings:\n- def __init__(self, crossover_prob=0.1, uniform_prob=0.7): Initialize to do uniform crossover at the specified probability and frequency.\n- def do_crossover(self, org_1, org_2): Potentially do a crossover between the two organisms.", "prompted_full_text": "Implement the Python class `UniformCrossover` described below.\n\nClass description:\nPerform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.\n\nMethod signatures and docstrings:\n- def __init__(self, crossover_prob=0.1, uniform_prob=0.7): Initialize to do uniform crossover at the specified probability and frequency.\n- def do_crossover(self, org_1, org_2): Potentially do a crossover between the two organisms.\n\n<|skeleton|>\nclass UniformCrossover:\n \"\"\"Perform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.\"\"\"\n\n def __init__(self, crossover_prob=0.1, uniform_prob=0.7):\n \"\"\"Initialize to do uniform crossover at the specified probability and frequency.\"\"\"\n <|body_0|>\n\n def do_crossover(self, org_1, org_2):\n \"\"\"Potentially do a crossover between the two organisms.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._crossover_prob = crossover_prob\n self._uniform_prob = uniform_prob\n return\n<|end_body_0|>\n\n<|body_start_1|>\n new_org_1 = org_1.copy()\n new_org_2 = org_2.copy()\n crossover_chance = random.random()\n if crossover_chance <= self._crossover_prob:\n minlen = min(len(new_org_1.genome), len(new_org_2.genome))\n for i in range(minlen):\n uniform_chance = random.random()\n if uniform_chance <= self._uniform_prob:\n temp = new_org_1.genome[i]\n new_org_1.genome[i] = new_org_2.genome[i]\n new_org_2.genome[i] = temp\n return (new_org_1, new_org_2)\n<|end_body_1|>\n", "revision_id": "1d9a8e84a8572809ee3260ede44290e14de3bdd1", "skeleton": "<|skeleton|>\nclass UniformCrossover:\n \"\"\"Perform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.\"\"\"\n\n def __init__(self, crossover_prob=0.1, uniform_prob=0.7):\n \"\"\"Initialize to do uniform crossover at the specified probability and frequency.\"\"\"\n <|body_0|>\n\n def do_crossover(self, org_1, org_2):\n \"\"\"Potentially do a crossover between the two organisms.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UniformCrossover:\n \"\"\"Perform single point crossover between genomes at some defined rates. This performs a single crossover between two genomes at some defined frequency. The location of the crossover is chosen randomly if the crossover meets the probability to occur.\"\"\"\n\n def __init__(self, crossover_prob=0.1, uniform_prob=0.7):\n \"\"\"Initialize to do uniform crossover at the specified probability and frequency.\"\"\"\n self._crossover_prob = crossover_prob\n self._uniform_prob = uniform_prob\n return\n\n def do_crossover(self, org_1, org_2):\n \"\"\"Potentially do a crossover between the two organisms.\"\"\"\n new_org_1 = org_1.copy()\n new_org_2 = org_2.copy()\n crossover_chance = random.random()\n if crossover_chance <= self._crossover_prob:\n minlen = min(len(new_org_1.genome), len(new_org_2.genome))\n for i in range(minlen):\n uniform_chance = random.random()\n if uniform_chance <= self._uniform_prob:\n temp = new_org_1.genome[i]\n new_org_1.genome[i] = new_org_2.genome[i]\n new_org_2.genome[i] = temp\n return (new_org_1, new_org_2)\n", "source": "the_stack_v2_python_sparse", "source_path": "bin/last_wrapper/Bio/GA/Crossover/Uniform.py", "source_repo": "LyonsLab/coge", "split": "test", "star_events_count": 41} {"blob_id": "d7c4c557f992fae72ee0c2d7f1fc2faf4bf7a3a1", "bodies": ["super().__init__()\nself.encoder = ESPNetX3_Encoder(classes, p, q)\nif encoderFile != None:\n self.encoder.load_state_dict(torch.load(encoderFile))\n print('Encoder loaded!')\nself.en_modules = []\nfor i, m in enumerate(self.encoder.children()):\n self.en_modules.append(m)\nself.level3_C = C(128 + 3, classes, 1, 1)\nself.br = nn.BatchNorm2d(classes, eps=0.001)\nself.conv = CBR(19 + classes, classes, 3, 1)\nself.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False))\nself.combine_l2_l3 = nn.Sequential(BR(2 * classes), DilatedParllelResidualBlockB(2 * classes, classes, add=False))\nself.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes))\nself.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)", "output0 = self.en_modules[0](input)\ninp1 = self.en_modules[1](input)\ninp2 = self.en_modules[2](input)\noutput0_cat = self.en_modules[3](torch.cat([output0, inp1], 1))\noutput1_0 = self.en_modules[4](output0_cat)\nfor i, layer in enumerate(self.en_modules[5]):\n if i == 0:\n output1 = layer(output1_0)\n else:\n output1 = layer(output1)\noutput1_cat = self.en_modules[6](torch.cat([output1, output1_0, inp2], 1))\noutput2_0 = self.en_modules[7](output1_cat)\nfor i, layer in enumerate(self.en_modules[8]):\n if i == 0:\n output2 = layer(output2_0)\n else:\n output2 = layer(output2)\noutput2_cat = self.en_modules[9](torch.cat([output2_0, output2], 1))\noutput2_c = self.up_l3(self.br(self.en_modules[10](output2_cat)))\noutput1_C = self.level3_C(output1_cat)\ncomb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1)))\nconcat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1))\nclassifier = self.classifier(concat_features)\nreturn classifier"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.encoder = ESPNetX3_Encoder(classes, p, q)\n if encoderFile != None:\n self.encoder.load_state_dict(torch.load(encoderFile))\n print('Encoder loaded!')\n self.en_modules = []\n for i, m in enumerate(self.encoder.children()):\n self.en_modules.append(m)\n self.level3_C = C(128 + 3, classes, 1, 1)\n self.br = nn.BatchNorm2d(classes, eps=0.001)\n self.conv = CBR(19 + classes, classes, 3, 1)\n self.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False))\n self.combine_l2_l3 = nn.Sequential(BR(2 * classes), DilatedParllelResidualBlockB(2 * classes, classes, add=False))\n self.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes))\n self.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)\n<|end_body_0|>\n\n<|body_start_1|>\n output0 = self.en_modules[0](input)\n inp1 = self.en_modules[1](input)\n inp2 = self.en_modules[2](input)\n output0_cat = self.en_modules[3](torch.cat([output0, inp1], 1))\n output1_0 = self.en_modules[4](output0_cat)\n for i, layer in enumerate(self.en_modules[5]):\n if i == 0:\n output1 = layer(output1_0)\n else:\n output1 = layer(output1)\n output1_cat = self.en_modules[6](torch.cat([output1, output1_0, inp2], 1))\n output2_0 = self.en_modules[7](output1_cat)\n for i, layer in enumerate(self.en_modules[8]):\n if i == 0:\n output2 = layer(output2_0)\n else:\n output2 = layer(output2)\n output2_cat = self.en_modules[9](torch.cat([output2_0, output2], 1))\n output2_c = self.up_l3(self.br(self.en_modules[10](output2_cat)))\n output1_C = self.level3_C(output1_cat)\n comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1)))\n concat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1))\n classifier = self.classifier(concat_features)\n return classifier\n<|end_body_1|>\n", "class_docstring": "This class defines the ESPNetX3 network", "class_name": "ESPNetX3", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ESPNetX3:\n \"\"\"This class defines the ESPNetX3 network\"\"\"\n\n def __init__(self, classes=19, p=2, q=3, encoderFile=None):\n \"\"\":param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\":param input: RGB image :return: transformed feature map\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.encoder = ESPNetX3_Encoder(classes, p, q)\n if encoderFile != None:\n self.encoder.load_state_dict(torch.load(encoderFile))\n print('Encoder loaded!')\n self.en_modules = []\n for i, m in enumerate(self.encoder.children()):\n self.en_modules.append(m)\n self.level3_C = C(128 + 3, classes, 1, 1)\n self.br = nn.BatchNorm2d(classes, eps=0.001)\n self.conv = CBR(19 + classes, classes, 3, 1)\n self.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False))\n self.combine_l2_l3 = nn.Sequential(BR(2 * classes), DilatedParllelResidualBlockB(2 * classes, classes, add=False))\n self.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes))\n self.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)\n<|end_body_0|>\n\n<|body_start_1|>\n output0 = self.en_modules[0](input)\n inp1 = self.en_modules[1](input)\n inp2 = self.en_modules[2](input)\n output0_cat = self.en_modules[3](torch.cat([output0, inp1], 1))\n output1_0 = self.en_modules[4](output0_cat)\n for i, layer in enumerate(self.en_modules[5]):\n if i == 0:\n output1 = layer(output1_0)\n else:\n output1 = layer(output1)\n output1_cat = self.en_modules[6](torch.cat([output1, output1_0, inp2], 1))\n output2_0 = self.en_modules[7](output1_cat)\n for i, layer in enumerate(self.en_modules[8]):\n if i == 0:\n output2 = layer(output2_0)\n else:\n output2 = layer(output2)\n output2_cat = self.en_modules[9](torch.cat([output2_0, output2], 1))\n output2_c = self.up_l3(self.br(self.en_modules[10](output2_cat)))\n output1_C = self.level3_C(output1_cat)\n comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1)))\n concat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1))\n classifier = self.classifier(concat_features)\n return classifier\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000307", "length_bytes": 45316, "license_type": "permissive", "methods": [{"docstring": ":param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.", "name": "__init__", "signature": "def __init__(self, classes=19, p=2, q=3, encoderFile=None)"}, {"docstring": ":param input: RGB image :return: transformed feature map", "name": "forward", "signature": "def forward(self, input)"}], "n_methods": 2, "prompt": "Implement the Python class `ESPNetX3` described below.\n\nClass description:\nThis class defines the ESPNetX3 network\n\nMethod signatures and docstrings:\n- def __init__(self, classes=19, p=2, q=3, encoderFile=None): :param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.\n- def forward(self, input): :param input: RGB image :return: transformed feature map", "prompted_full_text": "Implement the Python class `ESPNetX3` described below.\n\nClass description:\nThis class defines the ESPNetX3 network\n\nMethod signatures and docstrings:\n- def __init__(self, classes=19, p=2, q=3, encoderFile=None): :param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.\n- def forward(self, input): :param input: RGB image :return: transformed feature map\n\n<|skeleton|>\nclass ESPNetX3:\n \"\"\"This class defines the ESPNetX3 network\"\"\"\n\n def __init__(self, classes=19, p=2, q=3, encoderFile=None):\n \"\"\":param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\":param input: RGB image :return: transformed feature map\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.encoder = ESPNetX3_Encoder(classes, p, q)\n if encoderFile != None:\n self.encoder.load_state_dict(torch.load(encoderFile))\n print('Encoder loaded!')\n self.en_modules = []\n for i, m in enumerate(self.encoder.children()):\n self.en_modules.append(m)\n self.level3_C = C(128 + 3, classes, 1, 1)\n self.br = nn.BatchNorm2d(classes, eps=0.001)\n self.conv = CBR(19 + classes, classes, 3, 1)\n self.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False))\n self.combine_l2_l3 = nn.Sequential(BR(2 * classes), DilatedParllelResidualBlockB(2 * classes, classes, add=False))\n self.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes))\n self.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)\n<|end_body_0|>\n\n<|body_start_1|>\n output0 = self.en_modules[0](input)\n inp1 = self.en_modules[1](input)\n inp2 = self.en_modules[2](input)\n output0_cat = self.en_modules[3](torch.cat([output0, inp1], 1))\n output1_0 = self.en_modules[4](output0_cat)\n for i, layer in enumerate(self.en_modules[5]):\n if i == 0:\n output1 = layer(output1_0)\n else:\n output1 = layer(output1)\n output1_cat = self.en_modules[6](torch.cat([output1, output1_0, inp2], 1))\n output2_0 = self.en_modules[7](output1_cat)\n for i, layer in enumerate(self.en_modules[8]):\n if i == 0:\n output2 = layer(output2_0)\n else:\n output2 = layer(output2)\n output2_cat = self.en_modules[9](torch.cat([output2_0, output2], 1))\n output2_c = self.up_l3(self.br(self.en_modules[10](output2_cat)))\n output1_C = self.level3_C(output1_cat)\n comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1)))\n concat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1))\n classifier = self.classifier(concat_features)\n return classifier\n<|end_body_1|>\n", "revision_id": "27272e43126a507a6d93b21cd2372f5432f61237", "skeleton": "<|skeleton|>\nclass ESPNetX3:\n \"\"\"This class defines the ESPNetX3 network\"\"\"\n\n def __init__(self, classes=19, p=2, q=3, encoderFile=None):\n \"\"\":param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.\"\"\"\n <|body_0|>\n\n def forward(self, input):\n \"\"\":param input: RGB image :return: transformed feature map\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ESPNetX3:\n \"\"\"This class defines the ESPNetX3 network\"\"\"\n\n def __init__(self, classes=19, p=2, q=3, encoderFile=None):\n \"\"\":param classes: number of classes in the dataset. Default is 20 for the cityscapes :param p: depth multiplier :param q: depth multiplier :param encoderFile: pretrained encoder weights. Recall that we first trained the ESPNetX3-C and then attached the RUM-based light weight decoder. See paper for more details.\"\"\"\n super().__init__()\n self.encoder = ESPNetX3_Encoder(classes, p, q)\n if encoderFile != None:\n self.encoder.load_state_dict(torch.load(encoderFile))\n print('Encoder loaded!')\n self.en_modules = []\n for i, m in enumerate(self.encoder.children()):\n self.en_modules.append(m)\n self.level3_C = C(128 + 3, classes, 1, 1)\n self.br = nn.BatchNorm2d(classes, eps=0.001)\n self.conv = CBR(19 + classes, classes, 3, 1)\n self.up_l3 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False))\n self.combine_l2_l3 = nn.Sequential(BR(2 * classes), DilatedParllelResidualBlockB(2 * classes, classes, add=False))\n self.up_l2 = nn.Sequential(nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False), BR(classes))\n self.classifier = nn.ConvTranspose2d(classes, classes, 2, stride=2, padding=0, output_padding=0, bias=False)\n\n def forward(self, input):\n \"\"\":param input: RGB image :return: transformed feature map\"\"\"\n output0 = self.en_modules[0](input)\n inp1 = self.en_modules[1](input)\n inp2 = self.en_modules[2](input)\n output0_cat = self.en_modules[3](torch.cat([output0, inp1], 1))\n output1_0 = self.en_modules[4](output0_cat)\n for i, layer in enumerate(self.en_modules[5]):\n if i == 0:\n output1 = layer(output1_0)\n else:\n output1 = layer(output1)\n output1_cat = self.en_modules[6](torch.cat([output1, output1_0, inp2], 1))\n output2_0 = self.en_modules[7](output1_cat)\n for i, layer in enumerate(self.en_modules[8]):\n if i == 0:\n output2 = layer(output2_0)\n else:\n output2 = layer(output2)\n output2_cat = self.en_modules[9](torch.cat([output2_0, output2], 1))\n output2_c = self.up_l3(self.br(self.en_modules[10](output2_cat)))\n output1_C = self.level3_C(output1_cat)\n comb_l2_l3 = self.up_l2(self.combine_l2_l3(torch.cat([output1_C, output2_c], 1)))\n concat_features = self.conv(torch.cat([comb_l2_l3, output0_cat], 1))\n classifier = self.classifier(concat_features)\n return classifier\n", "source": "the_stack_v2_python_sparse", "source_path": "model/ESPNetX3.py", "source_repo": "Ethan-ye/Efficient-Segmentation-Networks", "split": "test", "star_events_count": 0} {"blob_id": "a4a4b424a9e28b1cd77c65e00c5ee41b20af15a0", "bodies": ["cluster_attrs = objects.Cluster.get_editable_attributes(cluster)\nneutron_mellanox_data = cluster_attrs.get('neutron_mellanox', {})\nif neutron_mellanox_data:\n storage_data = cluster_attrs.get('storage', {})\n nm = objects.Cluster.get_network_manager(cluster)\n node_attrs['neutron_mellanox'] = {}\n if 'plugin' in neutron_mellanox_data and neutron_mellanox_data['plugin']['value'] == 'ethernet':\n node_attrs['neutron_mellanox'].update({'physical_port': nm.get_network_by_netname('private', networks)['dev'], 'ml2_eswitch': {'vnic_type': 'hostdev', 'apply_profile_patch': True}})\n if 'iser' in storage_data and storage_data['iser']['value']:\n iser_new_name = 'eth_iser0'\n node_attrs['neutron_mellanox'].update({'storage_parent': nm.get_network_by_netname('storage', networks)['dev'], 'iser_interface_name': iser_new_name})\n storage_vlan = nm.get_network_by_netname('storage', networks).get('vlan')\n if storage_vlan:\n vlan_name = 'vlan{0}'.format(storage_vlan)\n node_attrs['network_scheme']['roles']['storage'] = vlan_name\n node_attrs['network_scheme']['interfaces'][vlan_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][vlan_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n node_attrs['network_scheme']['endpoints'][vlan_name]['vlandev'] = iser_new_name\n else:\n node_attrs['network_scheme']['roles']['storage'] = iser_new_name\n node_attrs['network_scheme']['interfaces'][iser_new_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][iser_new_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})", "mellanox_data = cluster_attrs.get('neutron_mellanox')\nif mellanox_data:\n serialized_node['ks_meta'].update({'mlnx_vf_num': mellanox_data.get('vf_num'), 'mlnx_plugin_mode': mellanox_data.get('plugin'), 'mlnx_iser_enabled': cluster_attrs.get('storage', {}).get('iser')})\n pm_data = serialized_node['ks_meta']['pm_data']\n if (mellanox_data['plugin'] == 'ethernet' or cluster_attrs['storage']['iser'] is True) and 'intel_iommu=' not in pm_data['kernel_params']:\n pm_data['kernel_params'] += ' intel_iommu=on'"], "bodies_text": "<|body_start_0|>\n cluster_attrs = objects.Cluster.get_editable_attributes(cluster)\n neutron_mellanox_data = cluster_attrs.get('neutron_mellanox', {})\n if neutron_mellanox_data:\n storage_data = cluster_attrs.get('storage', {})\n nm = objects.Cluster.get_network_manager(cluster)\n node_attrs['neutron_mellanox'] = {}\n if 'plugin' in neutron_mellanox_data and neutron_mellanox_data['plugin']['value'] == 'ethernet':\n node_attrs['neutron_mellanox'].update({'physical_port': nm.get_network_by_netname('private', networks)['dev'], 'ml2_eswitch': {'vnic_type': 'hostdev', 'apply_profile_patch': True}})\n if 'iser' in storage_data and storage_data['iser']['value']:\n iser_new_name = 'eth_iser0'\n node_attrs['neutron_mellanox'].update({'storage_parent': nm.get_network_by_netname('storage', networks)['dev'], 'iser_interface_name': iser_new_name})\n storage_vlan = nm.get_network_by_netname('storage', networks).get('vlan')\n if storage_vlan:\n vlan_name = 'vlan{0}'.format(storage_vlan)\n node_attrs['network_scheme']['roles']['storage'] = vlan_name\n node_attrs['network_scheme']['interfaces'][vlan_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][vlan_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n node_attrs['network_scheme']['endpoints'][vlan_name]['vlandev'] = iser_new_name\n else:\n node_attrs['network_scheme']['roles']['storage'] = iser_new_name\n node_attrs['network_scheme']['interfaces'][iser_new_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][iser_new_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n<|end_body_0|>\n\n<|body_start_1|>\n mellanox_data = cluster_attrs.get('neutron_mellanox')\n if mellanox_data:\n serialized_node['ks_meta'].update({'mlnx_vf_num': mellanox_data.get('vf_num'), 'mlnx_plugin_mode': mellanox_data.get('plugin'), 'mlnx_iser_enabled': cluster_attrs.get('storage', {}).get('iser')})\n pm_data = serialized_node['ks_meta']['pm_data']\n if (mellanox_data['plugin'] == 'ethernet' or cluster_attrs['storage']['iser'] is True) and 'intel_iommu=' not in pm_data['kernel_params']:\n pm_data['kernel_params'] += ' intel_iommu=on'\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MellanoxMixin", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MellanoxMixin:\n\n def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks):\n \"\"\"Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None\"\"\"\n <|body_0|>\n\n def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node):\n \"\"\"Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cluster_attrs = objects.Cluster.get_editable_attributes(cluster)\n neutron_mellanox_data = cluster_attrs.get('neutron_mellanox', {})\n if neutron_mellanox_data:\n storage_data = cluster_attrs.get('storage', {})\n nm = objects.Cluster.get_network_manager(cluster)\n node_attrs['neutron_mellanox'] = {}\n if 'plugin' in neutron_mellanox_data and neutron_mellanox_data['plugin']['value'] == 'ethernet':\n node_attrs['neutron_mellanox'].update({'physical_port': nm.get_network_by_netname('private', networks)['dev'], 'ml2_eswitch': {'vnic_type': 'hostdev', 'apply_profile_patch': True}})\n if 'iser' in storage_data and storage_data['iser']['value']:\n iser_new_name = 'eth_iser0'\n node_attrs['neutron_mellanox'].update({'storage_parent': nm.get_network_by_netname('storage', networks)['dev'], 'iser_interface_name': iser_new_name})\n storage_vlan = nm.get_network_by_netname('storage', networks).get('vlan')\n if storage_vlan:\n vlan_name = 'vlan{0}'.format(storage_vlan)\n node_attrs['network_scheme']['roles']['storage'] = vlan_name\n node_attrs['network_scheme']['interfaces'][vlan_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][vlan_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n node_attrs['network_scheme']['endpoints'][vlan_name]['vlandev'] = iser_new_name\n else:\n node_attrs['network_scheme']['roles']['storage'] = iser_new_name\n node_attrs['network_scheme']['interfaces'][iser_new_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][iser_new_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n<|end_body_0|>\n\n<|body_start_1|>\n mellanox_data = cluster_attrs.get('neutron_mellanox')\n if mellanox_data:\n serialized_node['ks_meta'].update({'mlnx_vf_num': mellanox_data.get('vf_num'), 'mlnx_plugin_mode': mellanox_data.get('plugin'), 'mlnx_iser_enabled': cluster_attrs.get('storage', {}).get('iser')})\n pm_data = serialized_node['ks_meta']['pm_data']\n if (mellanox_data['plugin'] == 'ethernet' or cluster_attrs['storage']['iser'] is True) and 'intel_iommu=' not in pm_data['kernel_params']:\n pm_data['kernel_params'] += ' intel_iommu=on'\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000308", "length_bytes": 6772, "license_type": "permissive", "methods": [{"docstring": "Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None", "name": "inject_mellanox_settings_for_deployment", "signature": "def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks)"}, {"docstring": "Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None", "name": "inject_mellanox_settings_for_provisioning", "signature": "def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009287", "prompt": "Implement the Python class `MellanoxMixin` described below.\n\nClass description:\nImplement the MellanoxMixin class.\n\nMethod signatures and docstrings:\n- def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks): Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None\n- def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node): Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None", "prompted_full_text": "Implement the Python class `MellanoxMixin` described below.\n\nClass description:\nImplement the MellanoxMixin class.\n\nMethod signatures and docstrings:\n- def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks): Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None\n- def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node): Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None\n\n<|skeleton|>\nclass MellanoxMixin:\n\n def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks):\n \"\"\"Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None\"\"\"\n <|body_0|>\n\n def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node):\n \"\"\"Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cluster_attrs = objects.Cluster.get_editable_attributes(cluster)\n neutron_mellanox_data = cluster_attrs.get('neutron_mellanox', {})\n if neutron_mellanox_data:\n storage_data = cluster_attrs.get('storage', {})\n nm = objects.Cluster.get_network_manager(cluster)\n node_attrs['neutron_mellanox'] = {}\n if 'plugin' in neutron_mellanox_data and neutron_mellanox_data['plugin']['value'] == 'ethernet':\n node_attrs['neutron_mellanox'].update({'physical_port': nm.get_network_by_netname('private', networks)['dev'], 'ml2_eswitch': {'vnic_type': 'hostdev', 'apply_profile_patch': True}})\n if 'iser' in storage_data and storage_data['iser']['value']:\n iser_new_name = 'eth_iser0'\n node_attrs['neutron_mellanox'].update({'storage_parent': nm.get_network_by_netname('storage', networks)['dev'], 'iser_interface_name': iser_new_name})\n storage_vlan = nm.get_network_by_netname('storage', networks).get('vlan')\n if storage_vlan:\n vlan_name = 'vlan{0}'.format(storage_vlan)\n node_attrs['network_scheme']['roles']['storage'] = vlan_name\n node_attrs['network_scheme']['interfaces'][vlan_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][vlan_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n node_attrs['network_scheme']['endpoints'][vlan_name]['vlandev'] = iser_new_name\n else:\n node_attrs['network_scheme']['roles']['storage'] = iser_new_name\n node_attrs['network_scheme']['interfaces'][iser_new_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][iser_new_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n<|end_body_0|>\n\n<|body_start_1|>\n mellanox_data = cluster_attrs.get('neutron_mellanox')\n if mellanox_data:\n serialized_node['ks_meta'].update({'mlnx_vf_num': mellanox_data.get('vf_num'), 'mlnx_plugin_mode': mellanox_data.get('plugin'), 'mlnx_iser_enabled': cluster_attrs.get('storage', {}).get('iser')})\n pm_data = serialized_node['ks_meta']['pm_data']\n if (mellanox_data['plugin'] == 'ethernet' or cluster_attrs['storage']['iser'] is True) and 'intel_iommu=' not in pm_data['kernel_params']:\n pm_data['kernel_params'] += ' intel_iommu=on'\n<|end_body_1|>\n", "revision_id": "768ac74a420f822261c4eb8da72f1d8af3c6bbff", "skeleton": "<|skeleton|>\nclass MellanoxMixin:\n\n def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks):\n \"\"\"Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None\"\"\"\n <|body_0|>\n\n def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node):\n \"\"\"Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MellanoxMixin:\n def inject_mellanox_settings_for_deployment(cls, node_attrs, cluster, networks):\n \"\"\"Mellanox settings for deployment Serialize mellanox node attrs then it will be merged with common attributes, if mellanox plugin or iSER storage enabled. :param node_attrs: attributes for specific node :type node_attrs: dict :param cluster: A cluster instance :type cluster: Cluster model :param networks: networks related for specific node :type networks: list :returns: None\"\"\"\n cluster_attrs = objects.Cluster.get_editable_attributes(cluster)\n neutron_mellanox_data = cluster_attrs.get('neutron_mellanox', {})\n if neutron_mellanox_data:\n storage_data = cluster_attrs.get('storage', {})\n nm = objects.Cluster.get_network_manager(cluster)\n node_attrs['neutron_mellanox'] = {}\n if 'plugin' in neutron_mellanox_data and neutron_mellanox_data['plugin']['value'] == 'ethernet':\n node_attrs['neutron_mellanox'].update({'physical_port': nm.get_network_by_netname('private', networks)['dev'], 'ml2_eswitch': {'vnic_type': 'hostdev', 'apply_profile_patch': True}})\n if 'iser' in storage_data and storage_data['iser']['value']:\n iser_new_name = 'eth_iser0'\n node_attrs['neutron_mellanox'].update({'storage_parent': nm.get_network_by_netname('storage', networks)['dev'], 'iser_interface_name': iser_new_name})\n storage_vlan = nm.get_network_by_netname('storage', networks).get('vlan')\n if storage_vlan:\n vlan_name = 'vlan{0}'.format(storage_vlan)\n node_attrs['network_scheme']['roles']['storage'] = vlan_name\n node_attrs['network_scheme']['interfaces'][vlan_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][vlan_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n node_attrs['network_scheme']['endpoints'][vlan_name]['vlandev'] = iser_new_name\n else:\n node_attrs['network_scheme']['roles']['storage'] = iser_new_name\n node_attrs['network_scheme']['interfaces'][iser_new_name] = {'L2': {'vlan_splinters': 'off'}}\n node_attrs['network_scheme']['endpoints'][iser_new_name] = node_attrs['network_scheme']['endpoints'].pop('br-storage', {})\n\n def inject_mellanox_settings_for_provisioning(cls, cluster_attrs, serialized_node):\n \"\"\"Mellanox settings for provisioning Serialize mellanox node attrs then it will be merged with common node attributes :param cluster_attrs: cluster attributes :type cluster_attrs: dict :param serialized_node: node attributes data for provisioning :type serialized_node: dict :returns: None\"\"\"\n mellanox_data = cluster_attrs.get('neutron_mellanox')\n if mellanox_data:\n serialized_node['ks_meta'].update({'mlnx_vf_num': mellanox_data.get('vf_num'), 'mlnx_plugin_mode': mellanox_data.get('plugin'), 'mlnx_iser_enabled': cluster_attrs.get('storage', {}).get('iser')})\n pm_data = serialized_node['ks_meta']['pm_data']\n if (mellanox_data['plugin'] == 'ethernet' or cluster_attrs['storage']['iser'] is True) and 'intel_iommu=' not in pm_data['kernel_params']:\n pm_data['kernel_params'] += ' intel_iommu=on'\n", "source": "the_stack_v2_python_sparse", "source_path": "nailgun/nailgun/orchestrator/base_serializers.py", "source_repo": "dis-xcom/fuel-web", "split": "test", "star_events_count": 0} {"blob_id": "cf2989a1475be7f01e38af99e8c92b541d370bf2", "bodies": ["assert branches, 'At least one branch is required'\nif __debug__:\n for branch in branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\nself.branches = branches\nsuper().__init__(function)", "assert isinstance(calls, list), 'Invalid calls %s' % calls\nassert isinstance(report, IReport), 'Invalid report %s' % report\ntry:\n merge(resolvers, self.contexts)\nexcept:\n raise AssemblyError('Cannot merge contexts at:%s' % locationStack(self.function))\nreport = report.open('Branching processor at:%s' % locationStack(self.function))\nprocessings = []\nfor branch in self.branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n try:\n processing = branch.process(sources, resolvers, extensions, report)\n except:\n raise AssemblyError('Cannot create processing at:%s' % locationStack(self.function))\n assert processing is None or isinstance(processing, Processing), 'Invalid processing %s' % processing\n processings.append(processing)\n\ndef wrapper(*args, **keyargs):\n self.call(*itertools.chain(args, processings), **keyargs)\nupdateWrapper(wrapper, self.call)\ncalls.append(wrapper)", "arguments, annotations = super().processArguments(arguments, annotations)\nn = len(self.branches)\nif len(arguments) >= n:\n return (arguments[n:], annotations)\nraise ProcessorError(\"Required function of form 'def processor(self, [chain], processing, ..., contex:Context ...)' for:%s\" % locationStack(self.function))"], "bodies_text": "<|body_start_0|>\n assert branches, 'At least one branch is required'\n if __debug__:\n for branch in branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n self.branches = branches\n super().__init__(function)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(calls, list), 'Invalid calls %s' % calls\n assert isinstance(report, IReport), 'Invalid report %s' % report\n try:\n merge(resolvers, self.contexts)\n except:\n raise AssemblyError('Cannot merge contexts at:%s' % locationStack(self.function))\n report = report.open('Branching processor at:%s' % locationStack(self.function))\n processings = []\n for branch in self.branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n try:\n processing = branch.process(sources, resolvers, extensions, report)\n except:\n raise AssemblyError('Cannot create processing at:%s' % locationStack(self.function))\n assert processing is None or isinstance(processing, Processing), 'Invalid processing %s' % processing\n processings.append(processing)\n\n def wrapper(*args, **keyargs):\n self.call(*itertools.chain(args, processings), **keyargs)\n updateWrapper(wrapper, self.call)\n calls.append(wrapper)\n<|end_body_1|>\n\n<|body_start_2|>\n arguments, annotations = super().processArguments(arguments, annotations)\n n = len(self.branches)\n if len(arguments) >= n:\n return (arguments[n:], annotations)\n raise ProcessorError(\"Required function of form 'def processor(self, [chain], processing, ..., contex:Context ...)' for:%s\" % locationStack(self.function))\n<|end_body_2|>\n", "class_docstring": "Implementation for @see: IProcessor that provides branching of other processors containers.", "class_name": "Brancher", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Brancher:\n \"\"\"Implementation for @see: IProcessor that provides branching of other processors containers.\"\"\"\n\n def __init__(self, function, *branches):\n \"\"\"Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.\"\"\"\n <|body_0|>\n\n def register(self, sources, resolvers, extensions, calls, report):\n \"\"\"@see: IProcessor.register\"\"\"\n <|body_1|>\n\n def processArguments(self, arguments, annotations):\n \"\"\"@see: Contextual.processArguments\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert branches, 'At least one branch is required'\n if __debug__:\n for branch in branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n self.branches = branches\n super().__init__(function)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(calls, list), 'Invalid calls %s' % calls\n assert isinstance(report, IReport), 'Invalid report %s' % report\n try:\n merge(resolvers, self.contexts)\n except:\n raise AssemblyError('Cannot merge contexts at:%s' % locationStack(self.function))\n report = report.open('Branching processor at:%s' % locationStack(self.function))\n processings = []\n for branch in self.branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n try:\n processing = branch.process(sources, resolvers, extensions, report)\n except:\n raise AssemblyError('Cannot create processing at:%s' % locationStack(self.function))\n assert processing is None or isinstance(processing, Processing), 'Invalid processing %s' % processing\n processings.append(processing)\n\n def wrapper(*args, **keyargs):\n self.call(*itertools.chain(args, processings), **keyargs)\n updateWrapper(wrapper, self.call)\n calls.append(wrapper)\n<|end_body_1|>\n\n<|body_start_2|>\n arguments, annotations = super().processArguments(arguments, annotations)\n n = len(self.branches)\n if len(arguments) >= n:\n return (arguments[n:], annotations)\n raise ProcessorError(\"Required function of form 'def processor(self, [chain], processing, ..., contex:Context ...)' for:%s\" % locationStack(self.function))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000309", "length_bytes": 19255, "license_type": "no_license", "methods": [{"docstring": "Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.", "name": "__init__", "signature": "def __init__(self, function, *branches)"}, {"docstring": "@see: IProcessor.register", "name": "register", "signature": "def register(self, sources, resolvers, extensions, calls, report)"}, {"docstring": "@see: Contextual.processArguments", "name": "processArguments", "signature": "def processArguments(self, arguments, annotations)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_043309", "prompt": "Implement the Python class `Brancher` described below.\n\nClass description:\nImplementation for @see: IProcessor that provides branching of other processors containers.\n\nMethod signatures and docstrings:\n- def __init__(self, function, *branches): Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.\n- def register(self, sources, resolvers, extensions, calls, report): @see: IProcessor.register\n- def processArguments(self, arguments, annotations): @see: Contextual.processArguments", "prompted_full_text": "Implement the Python class `Brancher` described below.\n\nClass description:\nImplementation for @see: IProcessor that provides branching of other processors containers.\n\nMethod signatures and docstrings:\n- def __init__(self, function, *branches): Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.\n- def register(self, sources, resolvers, extensions, calls, report): @see: IProcessor.register\n- def processArguments(self, arguments, annotations): @see: Contextual.processArguments\n\n<|skeleton|>\nclass Brancher:\n \"\"\"Implementation for @see: IProcessor that provides branching of other processors containers.\"\"\"\n\n def __init__(self, function, *branches):\n \"\"\"Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.\"\"\"\n <|body_0|>\n\n def register(self, sources, resolvers, extensions, calls, report):\n \"\"\"@see: IProcessor.register\"\"\"\n <|body_1|>\n\n def processArguments(self, arguments, annotations):\n \"\"\"@see: Contextual.processArguments\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert branches, 'At least one branch is required'\n if __debug__:\n for branch in branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n self.branches = branches\n super().__init__(function)\n<|end_body_0|>\n\n<|body_start_1|>\n assert isinstance(calls, list), 'Invalid calls %s' % calls\n assert isinstance(report, IReport), 'Invalid report %s' % report\n try:\n merge(resolvers, self.contexts)\n except:\n raise AssemblyError('Cannot merge contexts at:%s' % locationStack(self.function))\n report = report.open('Branching processor at:%s' % locationStack(self.function))\n processings = []\n for branch in self.branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n try:\n processing = branch.process(sources, resolvers, extensions, report)\n except:\n raise AssemblyError('Cannot create processing at:%s' % locationStack(self.function))\n assert processing is None or isinstance(processing, Processing), 'Invalid processing %s' % processing\n processings.append(processing)\n\n def wrapper(*args, **keyargs):\n self.call(*itertools.chain(args, processings), **keyargs)\n updateWrapper(wrapper, self.call)\n calls.append(wrapper)\n<|end_body_1|>\n\n<|body_start_2|>\n arguments, annotations = super().processArguments(arguments, annotations)\n n = len(self.branches)\n if len(arguments) >= n:\n return (arguments[n:], annotations)\n raise ProcessorError(\"Required function of form 'def processor(self, [chain], processing, ..., contex:Context ...)' for:%s\" % locationStack(self.function))\n<|end_body_2|>\n", "revision_id": "e0b3466b34d31548996d57be4a9dac134d904380", "skeleton": "<|skeleton|>\nclass Brancher:\n \"\"\"Implementation for @see: IProcessor that provides branching of other processors containers.\"\"\"\n\n def __init__(self, function, *branches):\n \"\"\"Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.\"\"\"\n <|body_0|>\n\n def register(self, sources, resolvers, extensions, calls, report):\n \"\"\"@see: IProcessor.register\"\"\"\n <|body_1|>\n\n def processArguments(self, arguments, annotations):\n \"\"\"@see: Contextual.processArguments\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Brancher:\n \"\"\"Implementation for @see: IProcessor that provides branching of other processors containers.\"\"\"\n\n def __init__(self, function, *branches):\n \"\"\"Construct the branching processor. @see: Contextual.__init__ @param branches: arguments[IBranch] The branches to use in branching.\"\"\"\n assert branches, 'At least one branch is required'\n if __debug__:\n for branch in branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n self.branches = branches\n super().__init__(function)\n\n def register(self, sources, resolvers, extensions, calls, report):\n \"\"\"@see: IProcessor.register\"\"\"\n assert isinstance(calls, list), 'Invalid calls %s' % calls\n assert isinstance(report, IReport), 'Invalid report %s' % report\n try:\n merge(resolvers, self.contexts)\n except:\n raise AssemblyError('Cannot merge contexts at:%s' % locationStack(self.function))\n report = report.open('Branching processor at:%s' % locationStack(self.function))\n processings = []\n for branch in self.branches:\n assert isinstance(branch, IBranch), 'Invalid branch %s' % branch\n try:\n processing = branch.process(sources, resolvers, extensions, report)\n except:\n raise AssemblyError('Cannot create processing at:%s' % locationStack(self.function))\n assert processing is None or isinstance(processing, Processing), 'Invalid processing %s' % processing\n processings.append(processing)\n\n def wrapper(*args, **keyargs):\n self.call(*itertools.chain(args, processings), **keyargs)\n updateWrapper(wrapper, self.call)\n calls.append(wrapper)\n\n def processArguments(self, arguments, annotations):\n \"\"\"@see: Contextual.processArguments\"\"\"\n arguments, annotations = super().processArguments(arguments, annotations)\n n = len(self.branches)\n if len(arguments) >= n:\n return (arguments[n:], annotations)\n raise ProcessorError(\"Required function of form 'def processor(self, [chain], processing, ..., contex:Context ...)' for:%s\" % locationStack(self.function))\n", "source": "the_stack_v2_python_sparse", "source_path": "components/ally/ally/design/processor/processor.py", "source_repo": "cristidomsa/Ally-Py", "split": "test", "star_events_count": 0} {"blob_id": "b859e66503c61efd0f8e98519c9b0474425c6400", "bodies": ["self.cbb = DpFile(cbb_file)\nself.wbb = DpFile(wbb_file)\nself.sam = DpFile(sam_file)\nself.plate = plate\nself.model = self.sam.model\nif not dwr_file == None:\n self.dwr = DpFile(dwr_file)\nelse:\n self.dwr = None\nself._calibrate_measurement()", "cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15, self.cbb.data.wavelength)\nwarm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15, self.wbb.data.wavelength)\nself.wbb.data.average_spectrum[0] = 1\nself.wbb.data.average_spectrum[2047] = 1\ncalibration_slope = (warm_blackbody - cold_blackbody) / (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum)\ncalibration_offset = warm_blackbody - self.wbb.data.average_spectrum * calibration_slope\nself.wbb.calibrate_file(calibration_slope, calibration_offset)\nself.cbb.calibrate_file(calibration_slope, calibration_offset)\nself.sam.calibrate_file(calibration_slope, calibration_offset)\nif not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n plate_temperature = self.dwr.header.spare_f[0]\n if self.plate == -1:\n plate_emissivity = self.dwr.header.spare_f[1]\n plate_blackbody = bb_radiance(plate_temperature + 273.15, self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n self.dwr.data.average_spectrum = (self.dwr.data.average_spectrum - plate_emission) / (1 - plate_emissivity)", "errors = []\nerrors.append(self.wbb.check_file(lower_wave, upper_wave))\nerrors.append(self.cbb.check_file(lower_wave, upper_wave))\nerrors.append(self.sam.check_file(lower_wave, upper_wave))\nif not self.dwr is None:\n errors.append(self.dwr.check_file(lower_wave, upper_wave))\nerrors = np.array(errors)\nif errors.max() > tolerance:\n return False\nelse:\n return True"], "bodies_text": "<|body_start_0|>\n self.cbb = DpFile(cbb_file)\n self.wbb = DpFile(wbb_file)\n self.sam = DpFile(sam_file)\n self.plate = plate\n self.model = self.sam.model\n if not dwr_file == None:\n self.dwr = DpFile(dwr_file)\n else:\n self.dwr = None\n self._calibrate_measurement()\n<|end_body_0|>\n\n<|body_start_1|>\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15, self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15, self.wbb.data.wavelength)\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n calibration_slope = (warm_blackbody - cold_blackbody) / (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum)\n calibration_offset = warm_blackbody - self.wbb.data.average_spectrum * calibration_slope\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n plate_temperature = self.dwr.header.spare_f[0]\n if self.plate == -1:\n plate_emissivity = self.dwr.header.spare_f[1]\n plate_blackbody = bb_radiance(plate_temperature + 273.15, self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n self.dwr.data.average_spectrum = (self.dwr.data.average_spectrum - plate_emission) / (1 - plate_emissivity)\n<|end_body_1|>\n\n<|body_start_2|>\n errors = []\n errors.append(self.wbb.check_file(lower_wave, upper_wave))\n errors.append(self.cbb.check_file(lower_wave, upper_wave))\n errors.append(self.sam.check_file(lower_wave, upper_wave))\n if not self.dwr is None:\n errors.append(self.dwr.check_file(lower_wave, upper_wave))\n errors = np.array(errors)\n if errors.max() > tolerance:\n return False\n else:\n return True\n<|end_body_2|>\n", "class_docstring": "A class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.", "class_name": "DpMeasurement", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DpMeasurement:\n \"\"\"A class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.\"\"\"\n\n def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1):\n \"\"\"DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.\"\"\"\n <|body_0|>\n\n def _calibrate_measurement(self):\n \"\"\"Calibrate the data for a measurement.\"\"\"\n <|body_1|>\n\n def check_consistency(self, lower_wave, upper_wave, tolerance):\n \"\"\"Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cbb = DpFile(cbb_file)\n self.wbb = DpFile(wbb_file)\n self.sam = DpFile(sam_file)\n self.plate = plate\n self.model = self.sam.model\n if not dwr_file == None:\n self.dwr = DpFile(dwr_file)\n else:\n self.dwr = None\n self._calibrate_measurement()\n<|end_body_0|>\n\n<|body_start_1|>\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15, self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15, self.wbb.data.wavelength)\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n calibration_slope = (warm_blackbody - cold_blackbody) / (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum)\n calibration_offset = warm_blackbody - self.wbb.data.average_spectrum * calibration_slope\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n plate_temperature = self.dwr.header.spare_f[0]\n if self.plate == -1:\n plate_emissivity = self.dwr.header.spare_f[1]\n plate_blackbody = bb_radiance(plate_temperature + 273.15, self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n self.dwr.data.average_spectrum = (self.dwr.data.average_spectrum - plate_emission) / (1 - plate_emissivity)\n<|end_body_1|>\n\n<|body_start_2|>\n errors = []\n errors.append(self.wbb.check_file(lower_wave, upper_wave))\n errors.append(self.cbb.check_file(lower_wave, upper_wave))\n errors.append(self.sam.check_file(lower_wave, upper_wave))\n if not self.dwr is None:\n errors.append(self.dwr.check_file(lower_wave, upper_wave))\n errors = np.array(errors)\n if errors.max() > tolerance:\n return False\n else:\n return True\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000310", "length_bytes": 4023, "license_type": "no_license", "methods": [{"docstring": "DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.", "name": "__init__", "signature": "def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1)"}, {"docstring": "Calibrate the data for a measurement.", "name": "_calibrate_measurement", "signature": "def _calibrate_measurement(self)"}, {"docstring": "Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.", "name": "check_consistency", "signature": "def check_consistency(self, lower_wave, upper_wave, tolerance)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_004117", "prompt": "Implement the Python class `DpMeasurement` described below.\n\nClass description:\nA class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.\n\nMethod signatures and docstrings:\n- def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1): DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.\n- def _calibrate_measurement(self): Calibrate the data for a measurement.\n- def check_consistency(self, lower_wave, upper_wave, tolerance): Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.", "prompted_full_text": "Implement the Python class `DpMeasurement` described below.\n\nClass description:\nA class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.\n\nMethod signatures and docstrings:\n- def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1): DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.\n- def _calibrate_measurement(self): Calibrate the data for a measurement.\n- def check_consistency(self, lower_wave, upper_wave, tolerance): Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.\n\n<|skeleton|>\nclass DpMeasurement:\n \"\"\"A class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.\"\"\"\n\n def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1):\n \"\"\"DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.\"\"\"\n <|body_0|>\n\n def _calibrate_measurement(self):\n \"\"\"Calibrate the data for a measurement.\"\"\"\n <|body_1|>\n\n def check_consistency(self, lower_wave, upper_wave, tolerance):\n \"\"\"Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cbb = DpFile(cbb_file)\n self.wbb = DpFile(wbb_file)\n self.sam = DpFile(sam_file)\n self.plate = plate\n self.model = self.sam.model\n if not dwr_file == None:\n self.dwr = DpFile(dwr_file)\n else:\n self.dwr = None\n self._calibrate_measurement()\n<|end_body_0|>\n\n<|body_start_1|>\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15, self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15, self.wbb.data.wavelength)\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n calibration_slope = (warm_blackbody - cold_blackbody) / (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum)\n calibration_offset = warm_blackbody - self.wbb.data.average_spectrum * calibration_slope\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n plate_temperature = self.dwr.header.spare_f[0]\n if self.plate == -1:\n plate_emissivity = self.dwr.header.spare_f[1]\n plate_blackbody = bb_radiance(plate_temperature + 273.15, self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n self.dwr.data.average_spectrum = (self.dwr.data.average_spectrum - plate_emission) / (1 - plate_emissivity)\n<|end_body_1|>\n\n<|body_start_2|>\n errors = []\n errors.append(self.wbb.check_file(lower_wave, upper_wave))\n errors.append(self.cbb.check_file(lower_wave, upper_wave))\n errors.append(self.sam.check_file(lower_wave, upper_wave))\n if not self.dwr is None:\n errors.append(self.dwr.check_file(lower_wave, upper_wave))\n errors = np.array(errors)\n if errors.max() > tolerance:\n return False\n else:\n return True\n<|end_body_2|>\n", "revision_id": "743167940f700374755ea273b90da66befae1ba4", "skeleton": "<|skeleton|>\nclass DpMeasurement:\n \"\"\"A class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.\"\"\"\n\n def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1):\n \"\"\"DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.\"\"\"\n <|body_0|>\n\n def _calibrate_measurement(self):\n \"\"\"Calibrate the data for a measurement.\"\"\"\n <|body_1|>\n\n def check_consistency(self, lower_wave, upper_wave, tolerance):\n \"\"\"Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DpMeasurement:\n \"\"\"A class that holds the information relavent to a complete measurement taken with a D&P Instruments Model 103F MicroFT or Model 202 TurboFT. Attributes: cbb - A DpFile instance holding the cold blackbody information. wbb - A DpFile instance holding the warm blackbody information. sam - A DpFile instance holding the sample information dwr - A DpFile instance holding the downwelling information, or None.\"\"\"\n\n def __init__(self, cbb_file, wbb_file, sam_file, dwr_file=None, plate=-1):\n \"\"\"DpMeasurement instance constructor. Arguments: cbb_file - The cold blackbody filename. wbb_file - The warm blackbody filename. sam_file - The sample filename. dwr_file - The downwelling filename. plate - The plate emissivity.\"\"\"\n self.cbb = DpFile(cbb_file)\n self.wbb = DpFile(wbb_file)\n self.sam = DpFile(sam_file)\n self.plate = plate\n self.model = self.sam.model\n if not dwr_file == None:\n self.dwr = DpFile(dwr_file)\n else:\n self.dwr = None\n self._calibrate_measurement()\n\n def _calibrate_measurement(self):\n \"\"\"Calibrate the data for a measurement.\"\"\"\n cold_blackbody = bb_radiance(self.cbb.header.cbb_temperature + 273.15, self.cbb.data.wavelength)\n warm_blackbody = bb_radiance(self.wbb.header.wbb_temperature + 273.15, self.wbb.data.wavelength)\n self.wbb.data.average_spectrum[0] = 1\n self.wbb.data.average_spectrum[2047] = 1\n calibration_slope = (warm_blackbody - cold_blackbody) / (self.wbb.data.average_spectrum - self.cbb.data.average_spectrum)\n calibration_offset = warm_blackbody - self.wbb.data.average_spectrum * calibration_slope\n self.wbb.calibrate_file(calibration_slope, calibration_offset)\n self.cbb.calibrate_file(calibration_slope, calibration_offset)\n self.sam.calibrate_file(calibration_slope, calibration_offset)\n if not self.dwr is None:\n self.dwr.calibrate_file(calibration_slope, calibration_offset)\n plate_temperature = self.dwr.header.spare_f[0]\n if self.plate == -1:\n plate_emissivity = self.dwr.header.spare_f[1]\n plate_blackbody = bb_radiance(plate_temperature + 273.15, self.dwr.data.wavelength)\n plate_emission = plate_emissivity * plate_blackbody\n self.dwr.data.average_spectrum = (self.dwr.data.average_spectrum - plate_emission) / (1 - plate_emissivity)\n\n def check_consistency(self, lower_wave, upper_wave, tolerance):\n \"\"\"Check the consistency of the measurement scans. Arguments: lower_wave - The lower wavelength limit to use for the check. upper_wave - The upper wavelength limit to use for the check. tolerance - The minimum acceptable consistency tolerance. Returns: True if the consistency is greater than the specified minimum allowed tolerance, False otherwise.\"\"\"\n errors = []\n errors.append(self.wbb.check_file(lower_wave, upper_wave))\n errors.append(self.cbb.check_file(lower_wave, upper_wave))\n errors.append(self.sam.check_file(lower_wave, upper_wave))\n if not self.dwr is None:\n errors.append(self.dwr.check_file(lower_wave, upper_wave))\n errors = np.array(errors)\n if errors.max() > tolerance:\n return False\n else:\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "tes/models/dp_models/dp_measurement.py", "source_repo": "max19951001/TES", "split": "test", "star_events_count": 0} {"blob_id": "82bb39dbb7391161dd61f37312a2e56b4923b0f9", "bodies": ["is_cloud_admin = self.helper.is_user_cloud_admin()\napps_user_is_admin_on = self.helper.get_owned_apps()\napp_name = self.request.get('appid')\nif not is_cloud_admin and app_name not in apps_user_is_admin_on:\n response = json.dumps({'error': True, 'message': 'Not authorized'})\n self.response.out.write(response)\n return\nquery = KindStat.all(_app=app_name)\ntime_stamp = datetime.datetime.now() - datetime.timedelta(days=self.MAX_DAYS_BACK)\nquery.filter('timestamp >', time_stamp)\nitems = query.fetch(self.MAX_KIND_STATS)\nresponse = self.convert_to_json(items)\nself.response.out.write(response)\nreturn", "items = []\nfor ent in kind_entities:\n items.append({time.mktime(ent.timestamp.timetuple()): {ent.kind_name: {'bytes': ent.bytes, 'count': ent.count}}})\nreturn json.dumps(items)"], "bodies_text": "<|body_start_0|>\n is_cloud_admin = self.helper.is_user_cloud_admin()\n apps_user_is_admin_on = self.helper.get_owned_apps()\n app_name = self.request.get('appid')\n if not is_cloud_admin and app_name not in apps_user_is_admin_on:\n response = json.dumps({'error': True, 'message': 'Not authorized'})\n self.response.out.write(response)\n return\n query = KindStat.all(_app=app_name)\n time_stamp = datetime.datetime.now() - datetime.timedelta(days=self.MAX_DAYS_BACK)\n query.filter('timestamp >', time_stamp)\n items = query.fetch(self.MAX_KIND_STATS)\n response = self.convert_to_json(items)\n self.response.out.write(response)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n items = []\n for ent in kind_entities:\n items.append({time.mktime(ent.timestamp.timetuple()): {ent.kind_name: {'bytes': ent.bytes, 'count': ent.count}}})\n return json.dumps(items)\n<|end_body_1|>\n", "class_docstring": "Class that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.", "class_name": "DatastoreStats", "detected_licenses": ["Apache-2.0", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DatastoreStats:\n \"\"\"Class that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.\"\"\"\n\n def get(self):\n \"\"\"Handler for GET request for the datastore statistics. Returns: The JSON output for testing.\"\"\"\n <|body_0|>\n\n def convert_to_json(self, kind_entities):\n \"\"\"Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_cloud_admin = self.helper.is_user_cloud_admin()\n apps_user_is_admin_on = self.helper.get_owned_apps()\n app_name = self.request.get('appid')\n if not is_cloud_admin and app_name not in apps_user_is_admin_on:\n response = json.dumps({'error': True, 'message': 'Not authorized'})\n self.response.out.write(response)\n return\n query = KindStat.all(_app=app_name)\n time_stamp = datetime.datetime.now() - datetime.timedelta(days=self.MAX_DAYS_BACK)\n query.filter('timestamp >', time_stamp)\n items = query.fetch(self.MAX_KIND_STATS)\n response = self.convert_to_json(items)\n self.response.out.write(response)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n items = []\n for ent in kind_entities:\n items.append({time.mktime(ent.timestamp.timetuple()): {ent.kind_name: {'bytes': ent.bytes, 'count': ent.count}}})\n return json.dumps(items)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000311", "length_bytes": 37207, "license_type": "permissive", "methods": [{"docstring": "Handler for GET request for the datastore statistics. Returns: The JSON output for testing.", "name": "get", "signature": "def get(self)"}, {"docstring": "Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.", "name": "convert_to_json", "signature": "def convert_to_json(self, kind_entities)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_008834", "prompt": "Implement the Python class `DatastoreStats` described below.\n\nClass description:\nClass that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.\n\nMethod signatures and docstrings:\n- def get(self): Handler for GET request for the datastore statistics. Returns: The JSON output for testing.\n- def convert_to_json(self, kind_entities): Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.", "prompted_full_text": "Implement the Python class `DatastoreStats` described below.\n\nClass description:\nClass that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.\n\nMethod signatures and docstrings:\n- def get(self): Handler for GET request for the datastore statistics. Returns: The JSON output for testing.\n- def convert_to_json(self, kind_entities): Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.\n\n<|skeleton|>\nclass DatastoreStats:\n \"\"\"Class that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.\"\"\"\n\n def get(self):\n \"\"\"Handler for GET request for the datastore statistics. Returns: The JSON output for testing.\"\"\"\n <|body_0|>\n\n def convert_to_json(self, kind_entities):\n \"\"\"Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n is_cloud_admin = self.helper.is_user_cloud_admin()\n apps_user_is_admin_on = self.helper.get_owned_apps()\n app_name = self.request.get('appid')\n if not is_cloud_admin and app_name not in apps_user_is_admin_on:\n response = json.dumps({'error': True, 'message': 'Not authorized'})\n self.response.out.write(response)\n return\n query = KindStat.all(_app=app_name)\n time_stamp = datetime.datetime.now() - datetime.timedelta(days=self.MAX_DAYS_BACK)\n query.filter('timestamp >', time_stamp)\n items = query.fetch(self.MAX_KIND_STATS)\n response = self.convert_to_json(items)\n self.response.out.write(response)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n items = []\n for ent in kind_entities:\n items.append({time.mktime(ent.timestamp.timetuple()): {ent.kind_name: {'bytes': ent.bytes, 'count': ent.count}}})\n return json.dumps(items)\n<|end_body_1|>\n", "revision_id": "aa36e8dfaa295d53bec616ed07f91ec8c02fa4e1", "skeleton": "<|skeleton|>\nclass DatastoreStats:\n \"\"\"Class that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.\"\"\"\n\n def get(self):\n \"\"\"Handler for GET request for the datastore statistics. Returns: The JSON output for testing.\"\"\"\n <|body_0|>\n\n def convert_to_json(self, kind_entities):\n \"\"\"Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DatastoreStats:\n \"\"\"Class that returns datastore statistics in JSON such as the number of a certain entity kind and the amount of total bytes.\"\"\"\n\n def get(self):\n \"\"\"Handler for GET request for the datastore statistics. Returns: The JSON output for testing.\"\"\"\n is_cloud_admin = self.helper.is_user_cloud_admin()\n apps_user_is_admin_on = self.helper.get_owned_apps()\n app_name = self.request.get('appid')\n if not is_cloud_admin and app_name not in apps_user_is_admin_on:\n response = json.dumps({'error': True, 'message': 'Not authorized'})\n self.response.out.write(response)\n return\n query = KindStat.all(_app=app_name)\n time_stamp = datetime.datetime.now() - datetime.timedelta(days=self.MAX_DAYS_BACK)\n query.filter('timestamp >', time_stamp)\n items = query.fetch(self.MAX_KIND_STATS)\n response = self.convert_to_json(items)\n self.response.out.write(response)\n return\n\n def convert_to_json(self, kind_entities):\n \"\"\"Converts KindStat entities to a json string. Args: kind_entities: A list of stats.KindStat. Returns: A JSON string containing kind statistic information.\"\"\"\n items = []\n for ent in kind_entities:\n items.append({time.mktime(ent.timestamp.timetuple()): {ent.kind_name: {'bytes': ent.bytes, 'count': ent.count}}})\n return json.dumps(items)\n", "source": "the_stack_v2_python_sparse", "source_path": "AppDashboard/dashboard.py", "source_repo": "shatterednirvana/appscale", "split": "test", "star_events_count": 6} {"blob_id": "c8b00059b1df5aeb6eee78c16eaf76442ecb53c8", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "TodoServiceServicer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TodoServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def CreateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def GetTodos(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def UpdateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def DeleteTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000312", "length_bytes": 6980, "license_type": "no_license", "methods": [{"docstring": "Missing associated documentation comment in .proto file.", "name": "CreateTodo", "signature": "def CreateTodo(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "GetTodos", "signature": "def GetTodos(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "UpdateTodo", "signature": "def UpdateTodo(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "DeleteTodo", "signature": "def DeleteTodo(self, request, context)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_030007", "prompt": "Implement the Python class `TodoServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def CreateTodo(self, request, context): Missing associated documentation comment in .proto file.\n- def GetTodos(self, request, context): Missing associated documentation comment in .proto file.\n- def UpdateTodo(self, request, context): Missing associated documentation comment in .proto file.\n- def DeleteTodo(self, request, context): Missing associated documentation comment in .proto file.", "prompted_full_text": "Implement the Python class `TodoServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def CreateTodo(self, request, context): Missing associated documentation comment in .proto file.\n- def GetTodos(self, request, context): Missing associated documentation comment in .proto file.\n- def UpdateTodo(self, request, context): Missing associated documentation comment in .proto file.\n- def DeleteTodo(self, request, context): Missing associated documentation comment in .proto file.\n\n<|skeleton|>\nclass TodoServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def CreateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def GetTodos(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def UpdateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def DeleteTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "revision_id": "f2f5418d8a7674e8f91de443b3bf72a419589f9f", "skeleton": "<|skeleton|>\nclass TodoServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def CreateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def GetTodos(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def UpdateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def DeleteTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TodoServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def CreateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetTodos(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteTodo(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "grpc-main-service/todo/todo_pb2_grpc.py", "source_repo": "Jprichard314/python-workbook", "split": "test", "star_events_count": 0} {"blob_id": "e21be80cf04e9ace8d262e9acf0e57f2ba72f4fd", "bodies": ["try:\n from pymc.model import Model\n model = Model.get_context()\nexcept TypeError:\n raise TypeError(\"No model on context stack, which is needed to instantiate distributions. Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution.\")\nif 'testval' in kwargs:\n initval = kwargs.pop('testval')\n warnings.warn('The `testval` argument is deprecated; use `initval`.', FutureWarning, stacklevel=2)\nif not isinstance(name, string_types):\n raise TypeError(f'Name needs to be a string but got: {name}')\ndims = convert_dims(dims)\nif observed is not None:\n observed = convert_observed_data(observed)\nif kwargs.get('size') is None and kwargs.get('shape') is None:\n if dims is not None:\n kwargs['shape'] = shape_from_dims(dims, model)\n elif observed is not None:\n kwargs['shape'] = tuple(observed.shape)\nrv_out = cls.dist(*args, **kwargs)\nrv_out = model.register_rv(rv_out, name, observed, total_size, dims=dims, transform=transform, initval=initval)\nrv_out.str_repr = types.MethodType(str_for_dist, rv_out)\nrv_out._repr_latex_ = types.MethodType(functools.partial(str_for_dist, formatting='latex'), rv_out)\nrv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\nrv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\nrv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\nreturn rv_out", "if 'testval' in kwargs:\n kwargs.pop('testval')\n warnings.warn(\"The `.dist(testval=...)` argument is deprecated and has no effect. Initial values for sampling/optimization can be specified with `initval` in a modelcontext. For using PyTensor's test value features, you must assign the `.tag.test_value` yourself.\", FutureWarning, stacklevel=2)\nif 'initval' in kwargs:\n raise TypeError('Unexpected keyword argument `initval`. This argument is not available for the `.dist()` API.')\nif 'dims' in kwargs:\n raise NotImplementedError('The use of a `.dist(dims=...)` API is not supported.')\nsize = kwargs.pop('size', None)\nif shape is not None and size is not None:\n raise ValueError(f'Passing both `shape` ({shape}) and `size` ({size}) is not supported!')\nshape = convert_shape(shape)\nsize = convert_size(size)\nndim_supp = getattr(cls.rv_op, 'ndim_supp', None)\nif ndim_supp is None:\n ndim_supp = cls.rv_op(*dist_params, **kwargs).owner.op.ndim_supp\ncreate_size = find_size(shape=shape, size=size, ndim_supp=ndim_supp)\nrv_out = cls.rv_op(*dist_params, size=create_size, **kwargs)\nrv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\nrv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\nrv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n_add_future_warning_tag(rv_out)\nreturn rv_out"], "bodies_text": "<|body_start_0|>\n try:\n from pymc.model import Model\n model = Model.get_context()\n except TypeError:\n raise TypeError(\"No model on context stack, which is needed to instantiate distributions. Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution.\")\n if 'testval' in kwargs:\n initval = kwargs.pop('testval')\n warnings.warn('The `testval` argument is deprecated; use `initval`.', FutureWarning, stacklevel=2)\n if not isinstance(name, string_types):\n raise TypeError(f'Name needs to be a string but got: {name}')\n dims = convert_dims(dims)\n if observed is not None:\n observed = convert_observed_data(observed)\n if kwargs.get('size') is None and kwargs.get('shape') is None:\n if dims is not None:\n kwargs['shape'] = shape_from_dims(dims, model)\n elif observed is not None:\n kwargs['shape'] = tuple(observed.shape)\n rv_out = cls.dist(*args, **kwargs)\n rv_out = model.register_rv(rv_out, name, observed, total_size, dims=dims, transform=transform, initval=initval)\n rv_out.str_repr = types.MethodType(str_for_dist, rv_out)\n rv_out._repr_latex_ = types.MethodType(functools.partial(str_for_dist, formatting='latex'), rv_out)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n return rv_out\n<|end_body_0|>\n\n<|body_start_1|>\n if 'testval' in kwargs:\n kwargs.pop('testval')\n warnings.warn(\"The `.dist(testval=...)` argument is deprecated and has no effect. Initial values for sampling/optimization can be specified with `initval` in a modelcontext. For using PyTensor's test value features, you must assign the `.tag.test_value` yourself.\", FutureWarning, stacklevel=2)\n if 'initval' in kwargs:\n raise TypeError('Unexpected keyword argument `initval`. This argument is not available for the `.dist()` API.')\n if 'dims' in kwargs:\n raise NotImplementedError('The use of a `.dist(dims=...)` API is not supported.')\n size = kwargs.pop('size', None)\n if shape is not None and size is not None:\n raise ValueError(f'Passing both `shape` ({shape}) and `size` ({size}) is not supported!')\n shape = convert_shape(shape)\n size = convert_size(size)\n ndim_supp = getattr(cls.rv_op, 'ndim_supp', None)\n if ndim_supp is None:\n ndim_supp = cls.rv_op(*dist_params, **kwargs).owner.op.ndim_supp\n create_size = find_size(shape=shape, size=size, ndim_supp=ndim_supp)\n rv_out = cls.rv_op(*dist_params, size=create_size, **kwargs)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n _add_future_warning_tag(rv_out)\n return rv_out\n<|end_body_1|>\n", "class_docstring": "Statistical distribution", "class_name": "Distribution", "detected_licenses": ["Apache-2.0", "AFL-2.1", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Distribution:\n \"\"\"Statistical distribution\"\"\"\n\n def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable:\n \"\"\"Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added\"\"\"\n <|body_0|>\n\n def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable:\n \"\"\"Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n from pymc.model import Model\n model = Model.get_context()\n except TypeError:\n raise TypeError(\"No model on context stack, which is needed to instantiate distributions. Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution.\")\n if 'testval' in kwargs:\n initval = kwargs.pop('testval')\n warnings.warn('The `testval` argument is deprecated; use `initval`.', FutureWarning, stacklevel=2)\n if not isinstance(name, string_types):\n raise TypeError(f'Name needs to be a string but got: {name}')\n dims = convert_dims(dims)\n if observed is not None:\n observed = convert_observed_data(observed)\n if kwargs.get('size') is None and kwargs.get('shape') is None:\n if dims is not None:\n kwargs['shape'] = shape_from_dims(dims, model)\n elif observed is not None:\n kwargs['shape'] = tuple(observed.shape)\n rv_out = cls.dist(*args, **kwargs)\n rv_out = model.register_rv(rv_out, name, observed, total_size, dims=dims, transform=transform, initval=initval)\n rv_out.str_repr = types.MethodType(str_for_dist, rv_out)\n rv_out._repr_latex_ = types.MethodType(functools.partial(str_for_dist, formatting='latex'), rv_out)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n return rv_out\n<|end_body_0|>\n\n<|body_start_1|>\n if 'testval' in kwargs:\n kwargs.pop('testval')\n warnings.warn(\"The `.dist(testval=...)` argument is deprecated and has no effect. Initial values for sampling/optimization can be specified with `initval` in a modelcontext. For using PyTensor's test value features, you must assign the `.tag.test_value` yourself.\", FutureWarning, stacklevel=2)\n if 'initval' in kwargs:\n raise TypeError('Unexpected keyword argument `initval`. This argument is not available for the `.dist()` API.')\n if 'dims' in kwargs:\n raise NotImplementedError('The use of a `.dist(dims=...)` API is not supported.')\n size = kwargs.pop('size', None)\n if shape is not None and size is not None:\n raise ValueError(f'Passing both `shape` ({shape}) and `size` ({size}) is not supported!')\n shape = convert_shape(shape)\n size = convert_size(size)\n ndim_supp = getattr(cls.rv_op, 'ndim_supp', None)\n if ndim_supp is None:\n ndim_supp = cls.rv_op(*dist_params, **kwargs).owner.op.ndim_supp\n create_size = find_size(shape=shape, size=size, ndim_supp=ndim_supp)\n rv_out = cls.rv_op(*dist_params, size=create_size, **kwargs)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n _add_future_warning_tag(rv_out)\n return rv_out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000313", "length_bytes": 46281, "license_type": "permissive", "methods": [{"docstring": "Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added", "name": "__new__", "signature": "def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable"}, {"docstring": "Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.", "name": "dist", "signature": "def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007716", "prompt": "Implement the Python class `Distribution` described below.\n\nClass description:\nStatistical distribution\n\nMethod signatures and docstrings:\n- def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable: Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added\n- def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable: Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.", "prompted_full_text": "Implement the Python class `Distribution` described below.\n\nClass description:\nStatistical distribution\n\nMethod signatures and docstrings:\n- def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable: Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added\n- def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable: Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.\n\n<|skeleton|>\nclass Distribution:\n \"\"\"Statistical distribution\"\"\"\n\n def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable:\n \"\"\"Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added\"\"\"\n <|body_0|>\n\n def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable:\n \"\"\"Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n from pymc.model import Model\n model = Model.get_context()\n except TypeError:\n raise TypeError(\"No model on context stack, which is needed to instantiate distributions. Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution.\")\n if 'testval' in kwargs:\n initval = kwargs.pop('testval')\n warnings.warn('The `testval` argument is deprecated; use `initval`.', FutureWarning, stacklevel=2)\n if not isinstance(name, string_types):\n raise TypeError(f'Name needs to be a string but got: {name}')\n dims = convert_dims(dims)\n if observed is not None:\n observed = convert_observed_data(observed)\n if kwargs.get('size') is None and kwargs.get('shape') is None:\n if dims is not None:\n kwargs['shape'] = shape_from_dims(dims, model)\n elif observed is not None:\n kwargs['shape'] = tuple(observed.shape)\n rv_out = cls.dist(*args, **kwargs)\n rv_out = model.register_rv(rv_out, name, observed, total_size, dims=dims, transform=transform, initval=initval)\n rv_out.str_repr = types.MethodType(str_for_dist, rv_out)\n rv_out._repr_latex_ = types.MethodType(functools.partial(str_for_dist, formatting='latex'), rv_out)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n return rv_out\n<|end_body_0|>\n\n<|body_start_1|>\n if 'testval' in kwargs:\n kwargs.pop('testval')\n warnings.warn(\"The `.dist(testval=...)` argument is deprecated and has no effect. Initial values for sampling/optimization can be specified with `initval` in a modelcontext. For using PyTensor's test value features, you must assign the `.tag.test_value` yourself.\", FutureWarning, stacklevel=2)\n if 'initval' in kwargs:\n raise TypeError('Unexpected keyword argument `initval`. This argument is not available for the `.dist()` API.')\n if 'dims' in kwargs:\n raise NotImplementedError('The use of a `.dist(dims=...)` API is not supported.')\n size = kwargs.pop('size', None)\n if shape is not None and size is not None:\n raise ValueError(f'Passing both `shape` ({shape}) and `size` ({size}) is not supported!')\n shape = convert_shape(shape)\n size = convert_size(size)\n ndim_supp = getattr(cls.rv_op, 'ndim_supp', None)\n if ndim_supp is None:\n ndim_supp = cls.rv_op(*dist_params, **kwargs).owner.op.ndim_supp\n create_size = find_size(shape=shape, size=size, ndim_supp=ndim_supp)\n rv_out = cls.rv_op(*dist_params, size=create_size, **kwargs)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n _add_future_warning_tag(rv_out)\n return rv_out\n<|end_body_1|>\n", "revision_id": "ddd1d4bf05a72895c67265f541585ae02bd338a3", "skeleton": "<|skeleton|>\nclass Distribution:\n \"\"\"Statistical distribution\"\"\"\n\n def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable:\n \"\"\"Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added\"\"\"\n <|body_0|>\n\n def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable:\n \"\"\"Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Distribution:\n \"\"\"Statistical distribution\"\"\"\n\n def __new__(cls, name: str, *args, rng=None, dims: Optional[Dims]=None, initval=None, observed=None, total_size=None, transform=UNSET, **kwargs) -> TensorVariable:\n \"\"\"Adds a tensor variable corresponding to a PyMC distribution to the current model. Note that all remaining kwargs must be compatible with ``.dist()`` Parameters ---------- cls : type A PyMC distribution. name : str Name for the new model variable. rng : optional Random number generator to use with the RandomVariable. dims : tuple, optional A tuple of dimension names known to the model. When shape is not provided, the shape of dims is used to define the shape of the variable. initval : optional Numeric or symbolic untransformed initial value of matching shape, or one of the following initial value strategies: \"moment\", \"prior\". Depending on the sampler's settings, a random jitter may be added\"\"\"\n try:\n from pymc.model import Model\n model = Model.get_context()\n except TypeError:\n raise TypeError(\"No model on context stack, which is needed to instantiate distributions. Add variable inside a 'with model:' block, or use the '.dist' syntax for a standalone distribution.\")\n if 'testval' in kwargs:\n initval = kwargs.pop('testval')\n warnings.warn('The `testval` argument is deprecated; use `initval`.', FutureWarning, stacklevel=2)\n if not isinstance(name, string_types):\n raise TypeError(f'Name needs to be a string but got: {name}')\n dims = convert_dims(dims)\n if observed is not None:\n observed = convert_observed_data(observed)\n if kwargs.get('size') is None and kwargs.get('shape') is None:\n if dims is not None:\n kwargs['shape'] = shape_from_dims(dims, model)\n elif observed is not None:\n kwargs['shape'] = tuple(observed.shape)\n rv_out = cls.dist(*args, **kwargs)\n rv_out = model.register_rv(rv_out, name, observed, total_size, dims=dims, transform=transform, initval=initval)\n rv_out.str_repr = types.MethodType(str_for_dist, rv_out)\n rv_out._repr_latex_ = types.MethodType(functools.partial(str_for_dist, formatting='latex'), rv_out)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n return rv_out\n\n def dist(cls, dist_params, *, shape: Optional[Shape]=None, **kwargs) -> TensorVariable:\n \"\"\"Creates a tensor variable corresponding to the `cls` distribution. Parameters ---------- dist_params : array-like The inputs to the `RandomVariable` `Op`. shape : int, tuple, Variable, optional A tuple of sizes for each dimension of the new RV. **kwargs Keyword arguments that will be forwarded to the PyTensor RV Op. Most prominently: ``size`` or ``dtype``. Returns ------- rv : TensorVariable The created random variable tensor.\"\"\"\n if 'testval' in kwargs:\n kwargs.pop('testval')\n warnings.warn(\"The `.dist(testval=...)` argument is deprecated and has no effect. Initial values for sampling/optimization can be specified with `initval` in a modelcontext. For using PyTensor's test value features, you must assign the `.tag.test_value` yourself.\", FutureWarning, stacklevel=2)\n if 'initval' in kwargs:\n raise TypeError('Unexpected keyword argument `initval`. This argument is not available for the `.dist()` API.')\n if 'dims' in kwargs:\n raise NotImplementedError('The use of a `.dist(dims=...)` API is not supported.')\n size = kwargs.pop('size', None)\n if shape is not None and size is not None:\n raise ValueError(f'Passing both `shape` ({shape}) and `size` ({size}) is not supported!')\n shape = convert_shape(shape)\n size = convert_size(size)\n ndim_supp = getattr(cls.rv_op, 'ndim_supp', None)\n if ndim_supp is None:\n ndim_supp = cls.rv_op(*dist_params, **kwargs).owner.op.ndim_supp\n create_size = find_size(shape=shape, size=size, ndim_supp=ndim_supp)\n rv_out = cls.rv_op(*dist_params, size=create_size, **kwargs)\n rv_out.logp = _make_nice_attr_error('rv.logp(x)', 'pm.logp(rv, x)')\n rv_out.logcdf = _make_nice_attr_error('rv.logcdf(x)', 'pm.logcdf(rv, x)')\n rv_out.random = _make_nice_attr_error('rv.random()', 'pm.draw(rv)')\n _add_future_warning_tag(rv_out)\n return rv_out\n", "source": "the_stack_v2_python_sparse", "source_path": "pymc/distributions/distribution.py", "source_repo": "pymc-devs/pymc", "split": "test", "star_events_count": 1046} {"blob_id": "4782f5becbace547088b04f348a2229a268ced5f", "bodies": ["startTime = datetime.datetime.now()\nclient = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ajr10_williami', 'ajr10_williami')\nrepo.dropCollection('ajr10_williami.cleaned_energy_cambridge')\nrepo.createCollection('ajr10_williami.cleaned_energy_cambridge')\nrepo.dropCollection('ajr10_williami.cleaned_energy_boston')\nrepo.createCollection('ajr10_williami.cleaned_energy_boston')\nenergy_cambridge = repo['ajr10_williami.energy_cambridge'].find()\nenergy_boston = repo['ajr10_williami.energy_boston'].find()\nfor cambridge_energy in energy_cambridge:\n CO2 = cambridge_energy['co2_kg']\n mmbtu = cambridge_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_cambridge'].insert(new_energy)\nfor boston_energy in energy_boston:\n CO2 = boston_energy['emission_co2']\n mmbtu = boston_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_boston'].insert(new_energy)\nrepo.logout()\nendTime = datetime.datetime.now()\nreturn {'start': startTime, 'end': endTime}", "client = dml.pymongo.MongoClient()\nrepo = client.repo\nrepo.authenticate('ajr10_williami', 'ajr10_williami')\ndoc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\ndoc.add_namespace('dat', 'http://datamechanics.io/data/')\ndoc.add_namespace('ont', 'http://datamechanics.io/ontology#')\ndoc.add_namespace('log', 'http://datamechanics.io/log/')\ndoc.add_namespace('awc', 'ajr10_williami')\nthis_script = doc.agent('alg:ajr10_williami#clean_energy', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\nenergy_cambridge_resource = doc.entity('awc:energy_cambridge', {'prov:label': 'ajr10_williami.cleaned_energy_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nenergy_boston_resource = doc.entity('awc:energy_boston', {'prov:label': 'ajr10_williami.cleaned_energy_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\nget_energys_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\nget_energys_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\ndoc.wasAssociatedWith(get_energys_cambridge, this_script)\ndoc.wasAssociatedWith(get_energys_boston, this_script)\ndoc.usage(get_energys_cambridge, energy_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Cambridge'})\ndoc.usage(get_energys_boston, energy_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Boston'})\nenergys_cambridge = doc.entity('dat:ajr10_williami#cleaned_energy_cambridge', {prov.model.PROV_LABEL: 'Energys Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(energys_cambridge, this_script)\ndoc.wasGeneratedBy(energys_cambridge, get_energys_cambridge, endTime)\ndoc.wasDerivedFrom(energys_cambridge, energy_cambridge_resource, get_energys_cambridge, get_energys_cambridge, get_energys_cambridge)\nenergys_boston = doc.entity('dat:ajr10_williami#cleaned_energy_boston', {prov.model.PROV_LABEL: 'Energys Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\ndoc.wasAttributedTo(energys_boston, this_script)\ndoc.wasGeneratedBy(energys_boston, get_energys_boston, endTime)\ndoc.wasDerivedFrom(energys_boston, energy_boston_resource, get_energys_boston, get_energys_boston, get_energys_boston)\nrepo.logout()\nreturn doc"], "bodies_text": "<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.createCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_energy_boston')\n repo.createCollection('ajr10_williami.cleaned_energy_boston')\n energy_cambridge = repo['ajr10_williami.energy_cambridge'].find()\n energy_boston = repo['ajr10_williami.energy_boston'].find()\n for cambridge_energy in energy_cambridge:\n CO2 = cambridge_energy['co2_kg']\n mmbtu = cambridge_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_cambridge'].insert(new_energy)\n for boston_energy in energy_boston:\n CO2 = boston_energy['emission_co2']\n mmbtu = boston_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_boston'].insert(new_energy)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_energy', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n energy_cambridge_resource = doc.entity('awc:energy_cambridge', {'prov:label': 'ajr10_williami.cleaned_energy_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n energy_boston_resource = doc.entity('awc:energy_boston', {'prov:label': 'ajr10_williami.cleaned_energy_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_energys_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_energys_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_energys_cambridge, this_script)\n doc.wasAssociatedWith(get_energys_boston, this_script)\n doc.usage(get_energys_cambridge, energy_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Cambridge'})\n doc.usage(get_energys_boston, energy_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Boston'})\n energys_cambridge = doc.entity('dat:ajr10_williami#cleaned_energy_cambridge', {prov.model.PROV_LABEL: 'Energys Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_cambridge, this_script)\n doc.wasGeneratedBy(energys_cambridge, get_energys_cambridge, endTime)\n doc.wasDerivedFrom(energys_cambridge, energy_cambridge_resource, get_energys_cambridge, get_energys_cambridge, get_energys_cambridge)\n energys_boston = doc.entity('dat:ajr10_williami#cleaned_energy_boston', {prov.model.PROV_LABEL: 'Energys Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_boston, this_script)\n doc.wasGeneratedBy(energys_boston, get_energys_boston, endTime)\n doc.wasDerivedFrom(energys_boston, energy_boston_resource, get_energys_boston, get_energys_boston, get_energys_boston)\n repo.logout()\n return doc\n<|end_body_1|>\n", "class_docstring": "", "class_name": "clean_energy", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass clean_energy:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.createCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_energy_boston')\n repo.createCollection('ajr10_williami.cleaned_energy_boston')\n energy_cambridge = repo['ajr10_williami.energy_cambridge'].find()\n energy_boston = repo['ajr10_williami.energy_boston'].find()\n for cambridge_energy in energy_cambridge:\n CO2 = cambridge_energy['co2_kg']\n mmbtu = cambridge_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_cambridge'].insert(new_energy)\n for boston_energy in energy_boston:\n CO2 = boston_energy['emission_co2']\n mmbtu = boston_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_boston'].insert(new_energy)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_energy', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n energy_cambridge_resource = doc.entity('awc:energy_cambridge', {'prov:label': 'ajr10_williami.cleaned_energy_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n energy_boston_resource = doc.entity('awc:energy_boston', {'prov:label': 'ajr10_williami.cleaned_energy_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_energys_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_energys_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_energys_cambridge, this_script)\n doc.wasAssociatedWith(get_energys_boston, this_script)\n doc.usage(get_energys_cambridge, energy_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Cambridge'})\n doc.usage(get_energys_boston, energy_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Boston'})\n energys_cambridge = doc.entity('dat:ajr10_williami#cleaned_energy_cambridge', {prov.model.PROV_LABEL: 'Energys Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_cambridge, this_script)\n doc.wasGeneratedBy(energys_cambridge, get_energys_cambridge, endTime)\n doc.wasDerivedFrom(energys_cambridge, energy_cambridge_resource, get_energys_cambridge, get_energys_cambridge, get_energys_cambridge)\n energys_boston = doc.entity('dat:ajr10_williami#cleaned_energy_boston', {prov.model.PROV_LABEL: 'Energys Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_boston, this_script)\n doc.wasGeneratedBy(energys_boston, get_energys_boston, endTime)\n doc.wasDerivedFrom(energys_boston, energy_boston_resource, get_energys_boston, get_energys_boston, get_energys_boston)\n repo.logout()\n return doc\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000314", "length_bytes": 5514, "license_type": "no_license", "methods": [{"docstring": "Retrieve some data sets and store in mongodb collections.", "name": "execute", "signature": "def execute(trial=False)"}, {"docstring": "Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "name": "provenance", "signature": "def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009305", "prompt": "Implement the Python class `clean_energy` described below.\n\nClass description:\nImplement the clean_energy class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets and store in mongodb collections.\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.", "prompted_full_text": "Implement the Python class `clean_energy` described below.\n\nClass description:\nImplement the clean_energy class.\n\nMethod signatures and docstrings:\n- def execute(trial=False): Retrieve some data sets and store in mongodb collections.\n- def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None): Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\n\n<|skeleton|>\nclass clean_energy:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.createCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_energy_boston')\n repo.createCollection('ajr10_williami.cleaned_energy_boston')\n energy_cambridge = repo['ajr10_williami.energy_cambridge'].find()\n energy_boston = repo['ajr10_williami.energy_boston'].find()\n for cambridge_energy in energy_cambridge:\n CO2 = cambridge_energy['co2_kg']\n mmbtu = cambridge_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_cambridge'].insert(new_energy)\n for boston_energy in energy_boston:\n CO2 = boston_energy['emission_co2']\n mmbtu = boston_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_boston'].insert(new_energy)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n<|end_body_0|>\n\n<|body_start_1|>\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_energy', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n energy_cambridge_resource = doc.entity('awc:energy_cambridge', {'prov:label': 'ajr10_williami.cleaned_energy_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n energy_boston_resource = doc.entity('awc:energy_boston', {'prov:label': 'ajr10_williami.cleaned_energy_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_energys_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_energys_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_energys_cambridge, this_script)\n doc.wasAssociatedWith(get_energys_boston, this_script)\n doc.usage(get_energys_cambridge, energy_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Cambridge'})\n doc.usage(get_energys_boston, energy_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Boston'})\n energys_cambridge = doc.entity('dat:ajr10_williami#cleaned_energy_cambridge', {prov.model.PROV_LABEL: 'Energys Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_cambridge, this_script)\n doc.wasGeneratedBy(energys_cambridge, get_energys_cambridge, endTime)\n doc.wasDerivedFrom(energys_cambridge, energy_cambridge_resource, get_energys_cambridge, get_energys_cambridge, get_energys_cambridge)\n energys_boston = doc.entity('dat:ajr10_williami#cleaned_energy_boston', {prov.model.PROV_LABEL: 'Energys Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_boston, this_script)\n doc.wasGeneratedBy(energys_boston, get_energys_boston, endTime)\n doc.wasDerivedFrom(energys_boston, energy_boston_resource, get_energys_boston, get_energys_boston, get_energys_boston)\n repo.logout()\n return doc\n<|end_body_1|>\n", "revision_id": "0df485d0469c5451ebdcd684bed2a0960ba3ab84", "skeleton": "<|skeleton|>\nclass clean_energy:\n\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n <|body_0|>\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class clean_energy:\n def execute(trial=False):\n \"\"\"Retrieve some data sets and store in mongodb collections.\"\"\"\n startTime = datetime.datetime.now()\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n repo.dropCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.createCollection('ajr10_williami.cleaned_energy_cambridge')\n repo.dropCollection('ajr10_williami.cleaned_energy_boston')\n repo.createCollection('ajr10_williami.cleaned_energy_boston')\n energy_cambridge = repo['ajr10_williami.energy_cambridge'].find()\n energy_boston = repo['ajr10_williami.energy_boston'].find()\n for cambridge_energy in energy_cambridge:\n CO2 = cambridge_energy['co2_kg']\n mmbtu = cambridge_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_cambridge'].insert(new_energy)\n for boston_energy in energy_boston:\n CO2 = boston_energy['emission_co2']\n mmbtu = boston_energy['use_mmbtu']\n new_energy = {}\n new_energy['CO2'] = CO2\n new_energy['mmbtu'] = mmbtu\n repo['ajr10_williami.cleaned_energy_boston'].insert(new_energy)\n repo.logout()\n endTime = datetime.datetime.now()\n return {'start': startTime, 'end': endTime}\n\n def provenance(doc=prov.model.ProvDocument(), startTime=None, endTime=None):\n \"\"\"Create the provenance document describing everything happening in this script. Each run of the script will generate a new document describing that invocation event.\"\"\"\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('ajr10_williami', 'ajr10_williami')\n doc.add_namespace('alg', 'http://datamechanics.io/algorithm/')\n doc.add_namespace('dat', 'http://datamechanics.io/data/')\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#')\n doc.add_namespace('log', 'http://datamechanics.io/log/')\n doc.add_namespace('awc', 'ajr10_williami')\n this_script = doc.agent('alg:ajr10_williami#clean_energy', {prov.model.PROV_TYPE: prov.model.PROV['SoftwareAgent'], 'ont:Extension': 'py'})\n energy_cambridge_resource = doc.entity('awc:energy_cambridge', {'prov:label': 'ajr10_williami.cleaned_energy_cambridge, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n energy_boston_resource = doc.entity('awc:energy_boston', {'prov:label': 'ajr10_williami.cleaned_energy_boston, Service Requests', prov.model.PROV_TYPE: 'ont:DataResource', 'ont:Extension': 'json'})\n get_energys_cambridge = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n get_energys_boston = doc.activity('log:uuid' + str(uuid.uuid4()), startTime, endTime)\n doc.wasAssociatedWith(get_energys_cambridge, this_script)\n doc.wasAssociatedWith(get_energys_boston, this_script)\n doc.usage(get_energys_cambridge, energy_cambridge_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Cambridge'})\n doc.usage(get_energys_boston, energy_boston_resource, startTime, None, {prov.model.PROV_TYPE: 'ont:Retrieval', 'ont:Query': '?type=Get+Energys+Boston'})\n energys_cambridge = doc.entity('dat:ajr10_williami#cleaned_energy_cambridge', {prov.model.PROV_LABEL: 'Energys Cambridge', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_cambridge, this_script)\n doc.wasGeneratedBy(energys_cambridge, get_energys_cambridge, endTime)\n doc.wasDerivedFrom(energys_cambridge, energy_cambridge_resource, get_energys_cambridge, get_energys_cambridge, get_energys_cambridge)\n energys_boston = doc.entity('dat:ajr10_williami#cleaned_energy_boston', {prov.model.PROV_LABEL: 'Energys Boston', prov.model.PROV_TYPE: 'ont:DataSet'})\n doc.wasAttributedTo(energys_boston, this_script)\n doc.wasGeneratedBy(energys_boston, get_energys_boston, endTime)\n doc.wasDerivedFrom(energys_boston, energy_boston_resource, get_energys_boston, get_energys_boston, get_energys_boston)\n repo.logout()\n return doc\n", "source": "the_stack_v2_python_sparse", "source_path": "ajr10_williami/clean_energy.py", "source_repo": "lingyigu/course-2017-spr-proj", "split": "test", "star_events_count": 0} {"blob_id": "df2a23695b28525efe6dbfca2fb93d66802d16d9", "bodies": ["self.lti_msg = msg\nself.lti_log = log\nreturn redirect(self.build_return_url())", "self.lti_errormsg = errormsg\nself.lti_errorlog = errorlog\nreturn redirect(self.build_return_url())"], "bodies_text": "<|body_start_0|>\n self.lti_msg = msg\n self.lti_log = log\n return redirect(self.build_return_url())\n<|end_body_0|>\n\n<|body_start_1|>\n self.lti_errormsg = errormsg\n self.lti_errorlog = errorlog\n return redirect(self.build_return_url())\n<|end_body_1|>\n", "class_docstring": "OAuth ToolProvider that works with Django requests.", "class_name": "DjangoToolProvider", "detected_licenses": ["MIT", "LGPL-2.0-or-later", "BSD-3-Clause", "Apache-2.0", "LGPL-2.1-only", "Python-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DjangoToolProvider:\n \"\"\"OAuth ToolProvider that works with Django requests.\"\"\"\n\n def success_redirect(self, msg='', log=''):\n \"\"\"Shortcut redirecting view to LTI Consumer with messages.\"\"\"\n <|body_0|>\n\n def error_redirect(self, errormsg='', errorlog=''):\n \"\"\"Shortcut for redirecting view to LTI Consumer with errors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lti_msg = msg\n self.lti_log = log\n return redirect(self.build_return_url())\n<|end_body_0|>\n\n<|body_start_1|>\n self.lti_errormsg = errormsg\n self.lti_errorlog = errorlog\n return redirect(self.build_return_url())\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000315", "length_bytes": 5385, "license_type": "permissive", "methods": [{"docstring": "Shortcut redirecting view to LTI Consumer with messages.", "name": "success_redirect", "signature": "def success_redirect(self, msg='', log='')"}, {"docstring": "Shortcut for redirecting view to LTI Consumer with errors.", "name": "error_redirect", "signature": "def error_redirect(self, errormsg='', errorlog='')"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000942", "prompt": "Implement the Python class `DjangoToolProvider` described below.\n\nClass description:\nOAuth ToolProvider that works with Django requests.\n\nMethod signatures and docstrings:\n- def success_redirect(self, msg='', log=''): Shortcut redirecting view to LTI Consumer with messages.\n- def error_redirect(self, errormsg='', errorlog=''): Shortcut for redirecting view to LTI Consumer with errors.", "prompted_full_text": "Implement the Python class `DjangoToolProvider` described below.\n\nClass description:\nOAuth ToolProvider that works with Django requests.\n\nMethod signatures and docstrings:\n- def success_redirect(self, msg='', log=''): Shortcut redirecting view to LTI Consumer with messages.\n- def error_redirect(self, errormsg='', errorlog=''): Shortcut for redirecting view to LTI Consumer with errors.\n\n<|skeleton|>\nclass DjangoToolProvider:\n \"\"\"OAuth ToolProvider that works with Django requests.\"\"\"\n\n def success_redirect(self, msg='', log=''):\n \"\"\"Shortcut redirecting view to LTI Consumer with messages.\"\"\"\n <|body_0|>\n\n def error_redirect(self, errormsg='', errorlog=''):\n \"\"\"Shortcut for redirecting view to LTI Consumer with errors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.lti_msg = msg\n self.lti_log = log\n return redirect(self.build_return_url())\n<|end_body_0|>\n\n<|body_start_1|>\n self.lti_errormsg = errormsg\n self.lti_errorlog = errorlog\n return redirect(self.build_return_url())\n<|end_body_1|>\n", "revision_id": "c432745dfff932cbe7397100422d49df78f0a882", "skeleton": "<|skeleton|>\nclass DjangoToolProvider:\n \"\"\"OAuth ToolProvider that works with Django requests.\"\"\"\n\n def success_redirect(self, msg='', log=''):\n \"\"\"Shortcut redirecting view to LTI Consumer with messages.\"\"\"\n <|body_0|>\n\n def error_redirect(self, errormsg='', errorlog=''):\n \"\"\"Shortcut for redirecting view to LTI Consumer with errors.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DjangoToolProvider:\n \"\"\"OAuth ToolProvider that works with Django requests.\"\"\"\n\n def success_redirect(self, msg='', log=''):\n \"\"\"Shortcut redirecting view to LTI Consumer with messages.\"\"\"\n self.lti_msg = msg\n self.lti_log = log\n return redirect(self.build_return_url())\n\n def error_redirect(self, errormsg='', errorlog=''):\n \"\"\"Shortcut for redirecting view to LTI Consumer with errors.\"\"\"\n self.lti_errormsg = errormsg\n self.lti_errorlog = errorlog\n return redirect(self.build_return_url())\n", "source": "the_stack_v2_python_sparse", "source_path": "ontask/lti/tool_provider.py", "source_repo": "abelardopardo/ontask_b", "split": "test", "star_events_count": 43} {"blob_id": "f1b537b3865b0aa315b8685656e6862a20b14e85", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n\n<|body_start_5|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_5|>\n", "class_docstring": "The JobController provides methods to manage jobs.", "class_name": "JobControllerServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass JobControllerServicer:\n \"\"\"The JobController provides methods to manage jobs.\"\"\"\n\n def SubmitJob(self, request, context):\n \"\"\"Submits a job to a cluster.\"\"\"\n <|body_0|>\n\n def GetJob(self, request, context):\n \"\"\"Gets the resource representation for a job in a project.\"\"\"\n <|body_1|>\n\n def ListJobs(self, request, context):\n \"\"\"Lists regions/{region}/jobs in a project.\"\"\"\n <|body_2|>\n\n def UpdateJob(self, request, context):\n \"\"\"Updates a job in a project.\"\"\"\n <|body_3|>\n\n def CancelJob(self, request, context):\n \"\"\"Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).\"\"\"\n <|body_4|>\n\n def DeleteJob(self, request, context):\n \"\"\"Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n\n<|body_start_5|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000316", "length_bytes": 6938, "license_type": "permissive", "methods": [{"docstring": "Submits a job to a cluster.", "name": "SubmitJob", "signature": "def SubmitJob(self, request, context)"}, {"docstring": "Gets the resource representation for a job in a project.", "name": "GetJob", "signature": "def GetJob(self, request, context)"}, {"docstring": "Lists regions/{region}/jobs in a project.", "name": "ListJobs", "signature": "def ListJobs(self, request, context)"}, {"docstring": "Updates a job in a project.", "name": "UpdateJob", "signature": "def UpdateJob(self, request, context)"}, {"docstring": "Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).", "name": "CancelJob", "signature": "def CancelJob(self, request, context)"}, {"docstring": "Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", "name": "DeleteJob", "signature": "def DeleteJob(self, request, context)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_028864", "prompt": "Implement the Python class `JobControllerServicer` described below.\n\nClass description:\nThe JobController provides methods to manage jobs.\n\nMethod signatures and docstrings:\n- def SubmitJob(self, request, context): Submits a job to a cluster.\n- def GetJob(self, request, context): Gets the resource representation for a job in a project.\n- def ListJobs(self, request, context): Lists regions/{region}/jobs in a project.\n- def UpdateJob(self, request, context): Updates a job in a project.\n- def CancelJob(self, request, context): Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).\n- def DeleteJob(self, request, context): Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.", "prompted_full_text": "Implement the Python class `JobControllerServicer` described below.\n\nClass description:\nThe JobController provides methods to manage jobs.\n\nMethod signatures and docstrings:\n- def SubmitJob(self, request, context): Submits a job to a cluster.\n- def GetJob(self, request, context): Gets the resource representation for a job in a project.\n- def ListJobs(self, request, context): Lists regions/{region}/jobs in a project.\n- def UpdateJob(self, request, context): Updates a job in a project.\n- def CancelJob(self, request, context): Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).\n- def DeleteJob(self, request, context): Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.\n\n<|skeleton|>\nclass JobControllerServicer:\n \"\"\"The JobController provides methods to manage jobs.\"\"\"\n\n def SubmitJob(self, request, context):\n \"\"\"Submits a job to a cluster.\"\"\"\n <|body_0|>\n\n def GetJob(self, request, context):\n \"\"\"Gets the resource representation for a job in a project.\"\"\"\n <|body_1|>\n\n def ListJobs(self, request, context):\n \"\"\"Lists regions/{region}/jobs in a project.\"\"\"\n <|body_2|>\n\n def UpdateJob(self, request, context):\n \"\"\"Updates a job in a project.\"\"\"\n <|body_3|>\n\n def CancelJob(self, request, context):\n \"\"\"Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).\"\"\"\n <|body_4|>\n\n def DeleteJob(self, request, context):\n \"\"\"Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n\n<|body_start_4|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_4|>\n\n<|body_start_5|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_5|>\n", "revision_id": "d897d56bce03d1fda98b79afb08264e51d46c421", "skeleton": "<|skeleton|>\nclass JobControllerServicer:\n \"\"\"The JobController provides methods to manage jobs.\"\"\"\n\n def SubmitJob(self, request, context):\n \"\"\"Submits a job to a cluster.\"\"\"\n <|body_0|>\n\n def GetJob(self, request, context):\n \"\"\"Gets the resource representation for a job in a project.\"\"\"\n <|body_1|>\n\n def ListJobs(self, request, context):\n \"\"\"Lists regions/{region}/jobs in a project.\"\"\"\n <|body_2|>\n\n def UpdateJob(self, request, context):\n \"\"\"Updates a job in a project.\"\"\"\n <|body_3|>\n\n def CancelJob(self, request, context):\n \"\"\"Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).\"\"\"\n <|body_4|>\n\n def DeleteJob(self, request, context):\n \"\"\"Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class JobControllerServicer:\n \"\"\"The JobController provides methods to manage jobs.\"\"\"\n\n def SubmitJob(self, request, context):\n \"\"\"Submits a job to a cluster.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def GetJob(self, request, context):\n \"\"\"Gets the resource representation for a job in a project.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def ListJobs(self, request, context):\n \"\"\"Lists regions/{region}/jobs in a project.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def UpdateJob(self, request, context):\n \"\"\"Updates a job in a project.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def CancelJob(self, request, context):\n \"\"\"Starts a job cancellation request. To access the job resource after cancellation, call [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def DeleteJob(self, request, context):\n \"\"\"Deletes the job from the project. If the job is active, the delete fails, and the response returns `FAILED_PRECONDITION`.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "dataproc/google/cloud/dataproc_v1/proto/jobs_pb2_grpc.py", "source_repo": "tswast/google-cloud-python", "split": "test", "star_events_count": 1} {"blob_id": "fc5098e903f2c888d6eadfb11a5210f594600dce", "bodies": ["tempDictionary = {}\ntempDictionary['total_price'] = 0\nfor product in products:\n if not product['name'] in self.products_data:\n new_product = super(Purchase, self).__init__(product['name'], product['type'], 0, product['price'])\n product_price = product['quantity'] * product['price']\n tax = product_price * self.products_data[product['name']]['purchase_tax'] / 100\n product['total_price'] = product_price + tax\n tempDictionary['total_price'] += product['total_price']\n if self.products_data[product['name']]['max_purchase'] < product['total_price']:\n if self.products_data[product['name']]['onhand'] == 0:\n del self.products_data[product['name']]\n print('Purchase price limit exceeds...')\n return False\ntempDictionary['date'] = datetime.datetime.utcnow()\ntempDictionary['state'] = 'Confirm'\ntempDictionary['products'] = products\ntempDictionary['vendor_name'] = vendor_name\norder = list(sorted(self.purchase_details.keys()))\nif order == []:\n key = 'PO/0001'\nelse:\n number = re.findall('\\\\d+', order[-1])[0]\n number = int(number) + 1\n key = 'PO/' + str(number).zfill(4)\nself.purchase_details[key] = tempDictionary\nreturn key", "order = self.purchase_details[purchase_order]\norder['state'] = 'Done'\nfor product in order['products']:\n pro_data = self.products_data[product['name']]\n pro_data['onhand'] += product['quantity']\n self.products_data[product['name']]['purchase_order'].append(purchase_order)\nreturn True"], "bodies_text": "<|body_start_0|>\n tempDictionary = {}\n tempDictionary['total_price'] = 0\n for product in products:\n if not product['name'] in self.products_data:\n new_product = super(Purchase, self).__init__(product['name'], product['type'], 0, product['price'])\n product_price = product['quantity'] * product['price']\n tax = product_price * self.products_data[product['name']]['purchase_tax'] / 100\n product['total_price'] = product_price + tax\n tempDictionary['total_price'] += product['total_price']\n if self.products_data[product['name']]['max_purchase'] < product['total_price']:\n if self.products_data[product['name']]['onhand'] == 0:\n del self.products_data[product['name']]\n print('Purchase price limit exceeds...')\n return False\n tempDictionary['date'] = datetime.datetime.utcnow()\n tempDictionary['state'] = 'Confirm'\n tempDictionary['products'] = products\n tempDictionary['vendor_name'] = vendor_name\n order = list(sorted(self.purchase_details.keys()))\n if order == []:\n key = 'PO/0001'\n else:\n number = re.findall('\\\\d+', order[-1])[0]\n number = int(number) + 1\n key = 'PO/' + str(number).zfill(4)\n self.purchase_details[key] = tempDictionary\n return key\n<|end_body_0|>\n\n<|body_start_1|>\n order = self.purchase_details[purchase_order]\n order['state'] = 'Done'\n for product in order['products']:\n pro_data = self.products_data[product['name']]\n pro_data['onhand'] += product['quantity']\n self.products_data[product['name']]['purchase_order'].append(purchase_order)\n return True\n<|end_body_1|>\n", "class_docstring": "This class used to store purchase of products", "class_name": "Purchase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Purchase:\n \"\"\"This class used to store purchase of products\"\"\"\n\n def create_purchase_order(self, products, vendor_name):\n \"\"\"func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number\"\"\"\n <|body_0|>\n\n def confirm_purchase_order(self, purchase_order):\n \"\"\"func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tempDictionary = {}\n tempDictionary['total_price'] = 0\n for product in products:\n if not product['name'] in self.products_data:\n new_product = super(Purchase, self).__init__(product['name'], product['type'], 0, product['price'])\n product_price = product['quantity'] * product['price']\n tax = product_price * self.products_data[product['name']]['purchase_tax'] / 100\n product['total_price'] = product_price + tax\n tempDictionary['total_price'] += product['total_price']\n if self.products_data[product['name']]['max_purchase'] < product['total_price']:\n if self.products_data[product['name']]['onhand'] == 0:\n del self.products_data[product['name']]\n print('Purchase price limit exceeds...')\n return False\n tempDictionary['date'] = datetime.datetime.utcnow()\n tempDictionary['state'] = 'Confirm'\n tempDictionary['products'] = products\n tempDictionary['vendor_name'] = vendor_name\n order = list(sorted(self.purchase_details.keys()))\n if order == []:\n key = 'PO/0001'\n else:\n number = re.findall('\\\\d+', order[-1])[0]\n number = int(number) + 1\n key = 'PO/' + str(number).zfill(4)\n self.purchase_details[key] = tempDictionary\n return key\n<|end_body_0|>\n\n<|body_start_1|>\n order = self.purchase_details[purchase_order]\n order['state'] = 'Done'\n for product in order['products']:\n pro_data = self.products_data[product['name']]\n pro_data['onhand'] += product['quantity']\n self.products_data[product['name']]['purchase_order'].append(purchase_order)\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000317", "length_bytes": 2497, "license_type": "no_license", "methods": [{"docstring": "func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number", "name": "create_purchase_order", "signature": "def create_purchase_order(self, products, vendor_name)"}, {"docstring": "func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms", "name": "confirm_purchase_order", "signature": "def confirm_purchase_order(self, purchase_order)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_030161", "prompt": "Implement the Python class `Purchase` described below.\n\nClass description:\nThis class used to store purchase of products\n\nMethod signatures and docstrings:\n- def create_purchase_order(self, products, vendor_name): func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number\n- def confirm_purchase_order(self, purchase_order): func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms", "prompted_full_text": "Implement the Python class `Purchase` described below.\n\nClass description:\nThis class used to store purchase of products\n\nMethod signatures and docstrings:\n- def create_purchase_order(self, products, vendor_name): func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number\n- def confirm_purchase_order(self, purchase_order): func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms\n\n<|skeleton|>\nclass Purchase:\n \"\"\"This class used to store purchase of products\"\"\"\n\n def create_purchase_order(self, products, vendor_name):\n \"\"\"func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number\"\"\"\n <|body_0|>\n\n def confirm_purchase_order(self, purchase_order):\n \"\"\"func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tempDictionary = {}\n tempDictionary['total_price'] = 0\n for product in products:\n if not product['name'] in self.products_data:\n new_product = super(Purchase, self).__init__(product['name'], product['type'], 0, product['price'])\n product_price = product['quantity'] * product['price']\n tax = product_price * self.products_data[product['name']]['purchase_tax'] / 100\n product['total_price'] = product_price + tax\n tempDictionary['total_price'] += product['total_price']\n if self.products_data[product['name']]['max_purchase'] < product['total_price']:\n if self.products_data[product['name']]['onhand'] == 0:\n del self.products_data[product['name']]\n print('Purchase price limit exceeds...')\n return False\n tempDictionary['date'] = datetime.datetime.utcnow()\n tempDictionary['state'] = 'Confirm'\n tempDictionary['products'] = products\n tempDictionary['vendor_name'] = vendor_name\n order = list(sorted(self.purchase_details.keys()))\n if order == []:\n key = 'PO/0001'\n else:\n number = re.findall('\\\\d+', order[-1])[0]\n number = int(number) + 1\n key = 'PO/' + str(number).zfill(4)\n self.purchase_details[key] = tempDictionary\n return key\n<|end_body_0|>\n\n<|body_start_1|>\n order = self.purchase_details[purchase_order]\n order['state'] = 'Done'\n for product in order['products']:\n pro_data = self.products_data[product['name']]\n pro_data['onhand'] += product['quantity']\n self.products_data[product['name']]['purchase_order'].append(purchase_order)\n return True\n<|end_body_1|>\n", "revision_id": "08668c834bdb4aee3abafdedc9126bba7aa041b8", "skeleton": "<|skeleton|>\nclass Purchase:\n \"\"\"This class used to store purchase of products\"\"\"\n\n def create_purchase_order(self, products, vendor_name):\n \"\"\"func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number\"\"\"\n <|body_0|>\n\n def confirm_purchase_order(self, purchase_order):\n \"\"\"func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Purchase:\n \"\"\"This class used to store purchase of products\"\"\"\n\n def create_purchase_order(self, products, vendor_name):\n \"\"\"func :- Used to create new purchase order. params :- products' names, quantity, type and price - dictionary params :- vendor name - string returns :- Error msg if more than purchase price else purchase number\"\"\"\n tempDictionary = {}\n tempDictionary['total_price'] = 0\n for product in products:\n if not product['name'] in self.products_data:\n new_product = super(Purchase, self).__init__(product['name'], product['type'], 0, product['price'])\n product_price = product['quantity'] * product['price']\n tax = product_price * self.products_data[product['name']]['purchase_tax'] / 100\n product['total_price'] = product_price + tax\n tempDictionary['total_price'] += product['total_price']\n if self.products_data[product['name']]['max_purchase'] < product['total_price']:\n if self.products_data[product['name']]['onhand'] == 0:\n del self.products_data[product['name']]\n print('Purchase price limit exceeds...')\n return False\n tempDictionary['date'] = datetime.datetime.utcnow()\n tempDictionary['state'] = 'Confirm'\n tempDictionary['products'] = products\n tempDictionary['vendor_name'] = vendor_name\n order = list(sorted(self.purchase_details.keys()))\n if order == []:\n key = 'PO/0001'\n else:\n number = re.findall('\\\\d+', order[-1])[0]\n number = int(number) + 1\n key = 'PO/' + str(number).zfill(4)\n self.purchase_details[key] = tempDictionary\n return key\n\n def confirm_purchase_order(self, purchase_order):\n \"\"\"func :- Used to manage purchased products params :- purchase order - string returns :- True or False as Order confirms\"\"\"\n order = self.purchase_details[purchase_order]\n order['state'] = 'Done'\n for product in order['products']:\n pro_data = self.products_data[product['name']]\n pro_data['onhand'] += product['quantity']\n self.products_data[product['name']]['purchase_order'].append(purchase_order)\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "Test1/purchase.py", "source_repo": "maulikb-emipro/Python-Training", "split": "test", "star_events_count": 0} {"blob_id": "1930bbdf06417f60ca99bd621d957b90d74bc742", "bodies": ["super().__init__()\nself.conv_layers = nn.Sequential()\nself.fc_layers = nn.Sequential()\nself.loss_criterion = None\nself.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\nself.conv_layers = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU(), nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU())\nself.fc_layers = nn.Sequential(nn.Flatten(), nn.Linear(500, 100), nn.Linear(100, 15))", "conv_features = None\nflattened_conv_features = None\nmodel_output = None\nconv_out = self.conv_layers(x)\nmodel_output = self.fc_layers(conv_out)\nreturn model_output"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.conv_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = None\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n self.conv_layers = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU(), nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU())\n self.fc_layers = nn.Sequential(nn.Flatten(), nn.Linear(500, 100), nn.Linear(100, 15))\n<|end_body_0|>\n\n<|body_start_1|>\n conv_features = None\n flattened_conv_features = None\n model_output = None\n conv_out = self.conv_layers(x)\n model_output = self.fc_layers(conv_out)\n return model_output\n<|end_body_1|>\n", "class_docstring": "Simple Network with atleast 2 conv2d layers and two linear layers.", "class_name": "SimpleNet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SimpleNet:\n \"\"\"Simple Network with atleast 2 conv2d layers and two linear layers.\"\"\"\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.conv_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = None\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n self.conv_layers = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU(), nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU())\n self.fc_layers = nn.Sequential(nn.Flatten(), nn.Linear(500, 100), nn.Linear(100, 15))\n<|end_body_0|>\n\n<|body_start_1|>\n conv_features = None\n flattened_conv_features = None\n model_output = None\n conv_out = self.conv_layers(x)\n model_output = self.fc_layers(conv_out)\n return model_output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000318", "length_bytes": 3115, "license_type": "no_license", "methods": [{"docstring": "Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]", "name": "forward", "signature": "def forward(self, x: torch.tensor) -> torch.tensor"}], "n_methods": 2, "prompt": "Implement the Python class `SimpleNet` described below.\n\nClass description:\nSimple Network with atleast 2 conv2d layers and two linear layers.\n\nMethod signatures and docstrings:\n- def __init__(self): Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.\n- def forward(self, x: torch.tensor) -> torch.tensor: Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]", "prompted_full_text": "Implement the Python class `SimpleNet` described below.\n\nClass description:\nSimple Network with atleast 2 conv2d layers and two linear layers.\n\nMethod signatures and docstrings:\n- def __init__(self): Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.\n- def forward(self, x: torch.tensor) -> torch.tensor: Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\n\n<|skeleton|>\nclass SimpleNet:\n \"\"\"Simple Network with atleast 2 conv2d layers and two linear layers.\"\"\"\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.conv_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = None\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n self.conv_layers = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU(), nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU())\n self.fc_layers = nn.Sequential(nn.Flatten(), nn.Linear(500, 100), nn.Linear(100, 15))\n<|end_body_0|>\n\n<|body_start_1|>\n conv_features = None\n flattened_conv_features = None\n model_output = None\n conv_out = self.conv_layers(x)\n model_output = self.fc_layers(conv_out)\n return model_output\n<|end_body_1|>\n", "revision_id": "471fce612f3ca63ff4e0780060108d847aff7193", "skeleton": "<|skeleton|>\nclass SimpleNet:\n \"\"\"Simple Network with atleast 2 conv2d layers and two linear layers.\"\"\"\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.\"\"\"\n <|body_0|>\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SimpleNet:\n \"\"\"Simple Network with atleast 2 conv2d layers and two linear layers.\"\"\"\n\n def __init__(self):\n \"\"\"Init function to define the layers and loss function Note: Use 'sum' reduction in the loss_criterion. Read Pytorch documention to understand what it means Hints: 1. Refer to https://pytorch.org/docs/stable/nn.html for layers 2. Remember to use non-linearities in your network. Network without non-linearities is not deep. 3. You will get 4D tensor for an image input from self.conv_layers. You need to process it and make it a compatible tensor input for self.fc_layers.\"\"\"\n super().__init__()\n self.conv_layers = nn.Sequential()\n self.fc_layers = nn.Sequential()\n self.loss_criterion = None\n self.loss_criterion = nn.CrossEntropyLoss(reduction='sum')\n self.conv_layers = nn.Sequential(nn.Conv2d(in_channels=1, out_channels=10, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU(), nn.Conv2d(in_channels=10, out_channels=20, kernel_size=5, bias=False), nn.MaxPool2d(kernel_size=3, stride=3), nn.ReLU())\n self.fc_layers = nn.Sequential(nn.Flatten(), nn.Linear(500, 100), nn.Linear(100, 15))\n\n def forward(self, x: torch.tensor) -> torch.tensor:\n \"\"\"Perform the forward pass with the net Note: do not perform soft-max or convert to probabilities in this function Args: - x: the input image [Dim: (N,C,H,W)] Returns: - y: the output (raw scores) of the net [Dim: (N,15)]\"\"\"\n conv_features = None\n flattened_conv_features = None\n model_output = None\n conv_out = self.conv_layers(x)\n model_output = self.fc_layers(conv_out)\n return model_output\n", "source": "the_stack_v2_python_sparse", "source_path": "PS6_v1/proj6_code/simple_net.py", "source_repo": "ConnorPeng/Computer-Vision", "split": "test", "star_events_count": 0} {"blob_id": "d5cd8da27f4cf029279d14fbd9fd3e964211bbc1", "bodies": ["Shape.__init__(self, turtle, color)\nself._x = x\nself._y = y\nself._radius = radius", "circumference = 2 * 3.14159 * self._radius\nself._turtle.up()\nself._turtle.move(self._x + self._radius, self._y)\nself._turtle.setDirection(90)\nself._turtle.down()\nfor i in xrange(120):\n self._turtle.move(circumference / 120)\n self._turtle.turn(3)\nself._turtle.up()"], "bodies_text": "<|body_start_0|>\n Shape.__init__(self, turtle, color)\n self._x = x\n self._y = y\n self._radius = radius\n<|end_body_0|>\n\n<|body_start_1|>\n circumference = 2 * 3.14159 * self._radius\n self._turtle.up()\n self._turtle.move(self._x + self._radius, self._y)\n self._turtle.setDirection(90)\n self._turtle.down()\n for i in xrange(120):\n self._turtle.move(circumference / 120)\n self._turtle.turn(3)\n self._turtle.up()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Circle", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Circle:\n\n def __init__(self, turtle, color, x, y, radius):\n \"\"\"This constructor initializes variables and an inherited class that will be needed for each circle.\"\"\"\n <|body_0|>\n\n def draw(self):\n \"\"\"Draws the circle that is represented by the stored variables.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Shape.__init__(self, turtle, color)\n self._x = x\n self._y = y\n self._radius = radius\n<|end_body_0|>\n\n<|body_start_1|>\n circumference = 2 * 3.14159 * self._radius\n self._turtle.up()\n self._turtle.move(self._x + self._radius, self._y)\n self._turtle.setDirection(90)\n self._turtle.down()\n for i in xrange(120):\n self._turtle.move(circumference / 120)\n self._turtle.turn(3)\n self._turtle.up()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000319", "length_bytes": 7146, "license_type": "permissive", "methods": [{"docstring": "This constructor initializes variables and an inherited class that will be needed for each circle.", "name": "__init__", "signature": "def __init__(self, turtle, color, x, y, radius)"}, {"docstring": "Draws the circle that is represented by the stored variables.", "name": "draw", "signature": "def draw(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_002757", "prompt": "Implement the Python class `Circle` described below.\n\nClass description:\nImplement the Circle class.\n\nMethod signatures and docstrings:\n- def __init__(self, turtle, color, x, y, radius): This constructor initializes variables and an inherited class that will be needed for each circle.\n- def draw(self): Draws the circle that is represented by the stored variables.", "prompted_full_text": "Implement the Python class `Circle` described below.\n\nClass description:\nImplement the Circle class.\n\nMethod signatures and docstrings:\n- def __init__(self, turtle, color, x, y, radius): This constructor initializes variables and an inherited class that will be needed for each circle.\n- def draw(self): Draws the circle that is represented by the stored variables.\n\n<|skeleton|>\nclass Circle:\n\n def __init__(self, turtle, color, x, y, radius):\n \"\"\"This constructor initializes variables and an inherited class that will be needed for each circle.\"\"\"\n <|body_0|>\n\n def draw(self):\n \"\"\"Draws the circle that is represented by the stored variables.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n Shape.__init__(self, turtle, color)\n self._x = x\n self._y = y\n self._radius = radius\n<|end_body_0|>\n\n<|body_start_1|>\n circumference = 2 * 3.14159 * self._radius\n self._turtle.up()\n self._turtle.move(self._x + self._radius, self._y)\n self._turtle.setDirection(90)\n self._turtle.down()\n for i in xrange(120):\n self._turtle.move(circumference / 120)\n self._turtle.turn(3)\n self._turtle.up()\n<|end_body_1|>\n", "revision_id": "ab7d24bd78719842f8790cc0e6c06dd1b327e416", "skeleton": "<|skeleton|>\nclass Circle:\n\n def __init__(self, turtle, color, x, y, radius):\n \"\"\"This constructor initializes variables and an inherited class that will be needed for each circle.\"\"\"\n <|body_0|>\n\n def draw(self):\n \"\"\"Draws the circle that is represented by the stored variables.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Circle:\n def __init__(self, turtle, color, x, y, radius):\n \"\"\"This constructor initializes variables and an inherited class that will be needed for each circle.\"\"\"\n Shape.__init__(self, turtle, color)\n self._x = x\n self._y = y\n self._radius = radius\n\n def draw(self):\n \"\"\"Draws the circle that is represented by the stored variables.\"\"\"\n circumference = 2 * 3.14159 * self._radius\n self._turtle.up()\n self._turtle.move(self._x + self._radius, self._y)\n self._turtle.setDirection(90)\n self._turtle.down()\n for i in xrange(120):\n self._turtle.move(circumference / 120)\n self._turtle.turn(3)\n self._turtle.up()\n", "source": "the_stack_v2_python_sparse", "source_path": "CS111_CS213/TreeGenerator/Lab3-Korey and Jake.py", "source_repo": "jawaff/CollegeCS", "split": "test", "star_events_count": 0} {"blob_id": "9a11548505fd45bec416ef4344362ecbd17559cd", "bodies": ["global n\nn = N\nself.q = Queue.PriorityQueue()\nself.q.put(Ran(0, n - 1))", "ran = self.q.get()\nl, r = (ran.l, ran.r)\nif l == 0:\n if l + 1 <= r:\n self.q.put(Ran(l + 1, r))\n return l\nelif r == n - 1:\n if r - 1 >= l:\n self.q.put(Ran(l, r - 1))\n return r\nll = r - l + 1\nmid = 0\nif ll % 2 == 0:\n mid = (ll - 1) / 2 + l\nelse:\n mid = ll / 2 + l\nif mid - 1 >= l:\n self.q.put(Ran(l, mid - 1))\nif mid + 1 <= r:\n self.q.put(Ran(mid + 1, r))\nreturn mid", "newq = Queue.PriorityQueue()\nL = None\nR = None\nwhile not self.q.empty():\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if r + 1 == p:\n L = l\n elif l - 1 == p:\n R = r\n else:\n newq.put(ran)\nif L != None and R != None:\n newq.put(Ran(L, R))\nelif L != None:\n newq.put(Ran(L, p))\nelif R != None:\n newq.put(Ran(p, R))\nelse:\n newq.put(Ran(p, p))\nself.q = newq"], "bodies_text": "<|body_start_0|>\n global n\n n = N\n self.q = Queue.PriorityQueue()\n self.q.put(Ran(0, n - 1))\n<|end_body_0|>\n\n<|body_start_1|>\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if l == 0:\n if l + 1 <= r:\n self.q.put(Ran(l + 1, r))\n return l\n elif r == n - 1:\n if r - 1 >= l:\n self.q.put(Ran(l, r - 1))\n return r\n ll = r - l + 1\n mid = 0\n if ll % 2 == 0:\n mid = (ll - 1) / 2 + l\n else:\n mid = ll / 2 + l\n if mid - 1 >= l:\n self.q.put(Ran(l, mid - 1))\n if mid + 1 <= r:\n self.q.put(Ran(mid + 1, r))\n return mid\n<|end_body_1|>\n\n<|body_start_2|>\n newq = Queue.PriorityQueue()\n L = None\n R = None\n while not self.q.empty():\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if r + 1 == p:\n L = l\n elif l - 1 == p:\n R = r\n else:\n newq.put(ran)\n if L != None and R != None:\n newq.put(Ran(L, R))\n elif L != None:\n newq.put(Ran(L, p))\n elif R != None:\n newq.put(Ran(p, R))\n else:\n newq.put(Ran(p, p))\n self.q = newq\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ExamRoom", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExamRoom:\n\n def __init__(self, N):\n \"\"\":type N: int\"\"\"\n <|body_0|>\n\n def seat(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def leave(self, p):\n \"\"\":type p: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global n\n n = N\n self.q = Queue.PriorityQueue()\n self.q.put(Ran(0, n - 1))\n<|end_body_0|>\n\n<|body_start_1|>\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if l == 0:\n if l + 1 <= r:\n self.q.put(Ran(l + 1, r))\n return l\n elif r == n - 1:\n if r - 1 >= l:\n self.q.put(Ran(l, r - 1))\n return r\n ll = r - l + 1\n mid = 0\n if ll % 2 == 0:\n mid = (ll - 1) / 2 + l\n else:\n mid = ll / 2 + l\n if mid - 1 >= l:\n self.q.put(Ran(l, mid - 1))\n if mid + 1 <= r:\n self.q.put(Ran(mid + 1, r))\n return mid\n<|end_body_1|>\n\n<|body_start_2|>\n newq = Queue.PriorityQueue()\n L = None\n R = None\n while not self.q.empty():\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if r + 1 == p:\n L = l\n elif l - 1 == p:\n R = r\n else:\n newq.put(ran)\n if L != None and R != None:\n newq.put(Ran(L, R))\n elif L != None:\n newq.put(Ran(L, p))\n elif R != None:\n newq.put(Ran(p, R))\n else:\n newq.put(Ran(p, p))\n self.q = newq\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000320", "length_bytes": 3191, "license_type": "no_license", "methods": [{"docstring": ":type N: int", "name": "__init__", "signature": "def __init__(self, N)"}, {"docstring": ":rtype: int", "name": "seat", "signature": "def seat(self)"}, {"docstring": ":type p: int :rtype: void", "name": "leave", "signature": "def leave(self, p)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_044313", "prompt": "Implement the Python class `ExamRoom` described below.\n\nClass description:\nImplement the ExamRoom class.\n\nMethod signatures and docstrings:\n- def __init__(self, N): :type N: int\n- def seat(self): :rtype: int\n- def leave(self, p): :type p: int :rtype: void", "prompted_full_text": "Implement the Python class `ExamRoom` described below.\n\nClass description:\nImplement the ExamRoom class.\n\nMethod signatures and docstrings:\n- def __init__(self, N): :type N: int\n- def seat(self): :rtype: int\n- def leave(self, p): :type p: int :rtype: void\n\n<|skeleton|>\nclass ExamRoom:\n\n def __init__(self, N):\n \"\"\":type N: int\"\"\"\n <|body_0|>\n\n def seat(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def leave(self, p):\n \"\"\":type p: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n global n\n n = N\n self.q = Queue.PriorityQueue()\n self.q.put(Ran(0, n - 1))\n<|end_body_0|>\n\n<|body_start_1|>\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if l == 0:\n if l + 1 <= r:\n self.q.put(Ran(l + 1, r))\n return l\n elif r == n - 1:\n if r - 1 >= l:\n self.q.put(Ran(l, r - 1))\n return r\n ll = r - l + 1\n mid = 0\n if ll % 2 == 0:\n mid = (ll - 1) / 2 + l\n else:\n mid = ll / 2 + l\n if mid - 1 >= l:\n self.q.put(Ran(l, mid - 1))\n if mid + 1 <= r:\n self.q.put(Ran(mid + 1, r))\n return mid\n<|end_body_1|>\n\n<|body_start_2|>\n newq = Queue.PriorityQueue()\n L = None\n R = None\n while not self.q.empty():\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if r + 1 == p:\n L = l\n elif l - 1 == p:\n R = r\n else:\n newq.put(ran)\n if L != None and R != None:\n newq.put(Ran(L, R))\n elif L != None:\n newq.put(Ran(L, p))\n elif R != None:\n newq.put(Ran(p, R))\n else:\n newq.put(Ran(p, p))\n self.q = newq\n<|end_body_2|>\n", "revision_id": "02ebe56cd92b9f4baeee132c5077892590018650", "skeleton": "<|skeleton|>\nclass ExamRoom:\n\n def __init__(self, N):\n \"\"\":type N: int\"\"\"\n <|body_0|>\n\n def seat(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n def leave(self, p):\n \"\"\":type p: int :rtype: void\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExamRoom:\n def __init__(self, N):\n \"\"\":type N: int\"\"\"\n global n\n n = N\n self.q = Queue.PriorityQueue()\n self.q.put(Ran(0, n - 1))\n\n def seat(self):\n \"\"\":rtype: int\"\"\"\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if l == 0:\n if l + 1 <= r:\n self.q.put(Ran(l + 1, r))\n return l\n elif r == n - 1:\n if r - 1 >= l:\n self.q.put(Ran(l, r - 1))\n return r\n ll = r - l + 1\n mid = 0\n if ll % 2 == 0:\n mid = (ll - 1) / 2 + l\n else:\n mid = ll / 2 + l\n if mid - 1 >= l:\n self.q.put(Ran(l, mid - 1))\n if mid + 1 <= r:\n self.q.put(Ran(mid + 1, r))\n return mid\n\n def leave(self, p):\n \"\"\":type p: int :rtype: void\"\"\"\n newq = Queue.PriorityQueue()\n L = None\n R = None\n while not self.q.empty():\n ran = self.q.get()\n l, r = (ran.l, ran.r)\n if r + 1 == p:\n L = l\n elif l - 1 == p:\n R = r\n else:\n newq.put(ran)\n if L != None and R != None:\n newq.put(Ran(L, R))\n elif L != None:\n newq.put(Ran(L, p))\n elif R != None:\n newq.put(Ran(p, R))\n else:\n newq.put(Ran(p, p))\n self.q = newq\n", "source": "the_stack_v2_python_sparse", "source_path": "python/leetcode.855.py", "source_repo": "CalvinNeo/LeetCode", "split": "test", "star_events_count": 3} {"blob_id": "a3a0f9b0c8bb55c12b2b8985915613ad842347cf", "bodies": ["if 'Sub Path' in data_source_dict:\n merged_path = data_source_dict['Path'] + job_id + data_source_dict['Subpath']\n del data_source_dict['Subpath']\nelse:\n merged_path = data_source_dict['Path']\nreturn merged_path", "data_source_args = {}\nservice = service_factory.create_service(data_source_dict)\npath = self._merge_path(job_id, data_source_dict)\ndata_source_args['Path'] = path\ndata_source_args['Source Name'] = data_source_dict['Source Name']\ndata_source_args['Status'] = data_source_dict['Status']\ndata_source = ds.DataSource(job_id, service, data_source_args)\nreturn data_source", "data_sources = list()\nfor job_id in job_id_list:\n data_source = self.create_data_source(job_id, data_source_dict)\n data_sources.append(data_source)\nreturn data_sources"], "bodies_text": "<|body_start_0|>\n if 'Sub Path' in data_source_dict:\n merged_path = data_source_dict['Path'] + job_id + data_source_dict['Subpath']\n del data_source_dict['Subpath']\n else:\n merged_path = data_source_dict['Path']\n return merged_path\n<|end_body_0|>\n\n<|body_start_1|>\n data_source_args = {}\n service = service_factory.create_service(data_source_dict)\n path = self._merge_path(job_id, data_source_dict)\n data_source_args['Path'] = path\n data_source_args['Source Name'] = data_source_dict['Source Name']\n data_source_args['Status'] = data_source_dict['Status']\n data_source = ds.DataSource(job_id, service, data_source_args)\n return data_source\n<|end_body_1|>\n\n<|body_start_2|>\n data_sources = list()\n for job_id in job_id_list:\n data_source = self.create_data_source(job_id, data_source_dict)\n data_sources.append(data_source)\n return data_sources\n<|end_body_2|>\n", "class_docstring": "A factory for creating data sources", "class_name": "DataSourceFactory", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DataSourceFactory:\n \"\"\"A factory for creating data sources\"\"\"\n\n def _merge_path(self, job_id, data_source_dict):\n \"\"\"Merges the path, job id, and subpath. Returns it as a string\"\"\"\n <|body_0|>\n\n def create_data_source(self, job_id, data_source_dict):\n \"\"\"Creates one data source given a job id, and a data_source_dict (from DataSources.json)\"\"\"\n <|body_1|>\n\n def create_data_source_list(self, job_id_list, data_source_dict):\n \"\"\"Creates a list of data sources given a list of ids and a data_source_dict\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'Sub Path' in data_source_dict:\n merged_path = data_source_dict['Path'] + job_id + data_source_dict['Subpath']\n del data_source_dict['Subpath']\n else:\n merged_path = data_source_dict['Path']\n return merged_path\n<|end_body_0|>\n\n<|body_start_1|>\n data_source_args = {}\n service = service_factory.create_service(data_source_dict)\n path = self._merge_path(job_id, data_source_dict)\n data_source_args['Path'] = path\n data_source_args['Source Name'] = data_source_dict['Source Name']\n data_source_args['Status'] = data_source_dict['Status']\n data_source = ds.DataSource(job_id, service, data_source_args)\n return data_source\n<|end_body_1|>\n\n<|body_start_2|>\n data_sources = list()\n for job_id in job_id_list:\n data_source = self.create_data_source(job_id, data_source_dict)\n data_sources.append(data_source)\n return data_sources\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000321", "length_bytes": 1577, "license_type": "no_license", "methods": [{"docstring": "Merges the path, job id, and subpath. Returns it as a string", "name": "_merge_path", "signature": "def _merge_path(self, job_id, data_source_dict)"}, {"docstring": "Creates one data source given a job id, and a data_source_dict (from DataSources.json)", "name": "create_data_source", "signature": "def create_data_source(self, job_id, data_source_dict)"}, {"docstring": "Creates a list of data sources given a list of ids and a data_source_dict", "name": "create_data_source_list", "signature": "def create_data_source_list(self, job_id_list, data_source_dict)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_008392", "prompt": "Implement the Python class `DataSourceFactory` described below.\n\nClass description:\nA factory for creating data sources\n\nMethod signatures and docstrings:\n- def _merge_path(self, job_id, data_source_dict): Merges the path, job id, and subpath. Returns it as a string\n- def create_data_source(self, job_id, data_source_dict): Creates one data source given a job id, and a data_source_dict (from DataSources.json)\n- def create_data_source_list(self, job_id_list, data_source_dict): Creates a list of data sources given a list of ids and a data_source_dict", "prompted_full_text": "Implement the Python class `DataSourceFactory` described below.\n\nClass description:\nA factory for creating data sources\n\nMethod signatures and docstrings:\n- def _merge_path(self, job_id, data_source_dict): Merges the path, job id, and subpath. Returns it as a string\n- def create_data_source(self, job_id, data_source_dict): Creates one data source given a job id, and a data_source_dict (from DataSources.json)\n- def create_data_source_list(self, job_id_list, data_source_dict): Creates a list of data sources given a list of ids and a data_source_dict\n\n<|skeleton|>\nclass DataSourceFactory:\n \"\"\"A factory for creating data sources\"\"\"\n\n def _merge_path(self, job_id, data_source_dict):\n \"\"\"Merges the path, job id, and subpath. Returns it as a string\"\"\"\n <|body_0|>\n\n def create_data_source(self, job_id, data_source_dict):\n \"\"\"Creates one data source given a job id, and a data_source_dict (from DataSources.json)\"\"\"\n <|body_1|>\n\n def create_data_source_list(self, job_id_list, data_source_dict):\n \"\"\"Creates a list of data sources given a list of ids and a data_source_dict\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if 'Sub Path' in data_source_dict:\n merged_path = data_source_dict['Path'] + job_id + data_source_dict['Subpath']\n del data_source_dict['Subpath']\n else:\n merged_path = data_source_dict['Path']\n return merged_path\n<|end_body_0|>\n\n<|body_start_1|>\n data_source_args = {}\n service = service_factory.create_service(data_source_dict)\n path = self._merge_path(job_id, data_source_dict)\n data_source_args['Path'] = path\n data_source_args['Source Name'] = data_source_dict['Source Name']\n data_source_args['Status'] = data_source_dict['Status']\n data_source = ds.DataSource(job_id, service, data_source_args)\n return data_source\n<|end_body_1|>\n\n<|body_start_2|>\n data_sources = list()\n for job_id in job_id_list:\n data_source = self.create_data_source(job_id, data_source_dict)\n data_sources.append(data_source)\n return data_sources\n<|end_body_2|>\n", "revision_id": "c69fe121799d72d5239d2da59577e9c1b7a9c51c", "skeleton": "<|skeleton|>\nclass DataSourceFactory:\n \"\"\"A factory for creating data sources\"\"\"\n\n def _merge_path(self, job_id, data_source_dict):\n \"\"\"Merges the path, job id, and subpath. Returns it as a string\"\"\"\n <|body_0|>\n\n def create_data_source(self, job_id, data_source_dict):\n \"\"\"Creates one data source given a job id, and a data_source_dict (from DataSources.json)\"\"\"\n <|body_1|>\n\n def create_data_source_list(self, job_id_list, data_source_dict):\n \"\"\"Creates a list of data sources given a list of ids and a data_source_dict\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DataSourceFactory:\n \"\"\"A factory for creating data sources\"\"\"\n\n def _merge_path(self, job_id, data_source_dict):\n \"\"\"Merges the path, job id, and subpath. Returns it as a string\"\"\"\n if 'Sub Path' in data_source_dict:\n merged_path = data_source_dict['Path'] + job_id + data_source_dict['Subpath']\n del data_source_dict['Subpath']\n else:\n merged_path = data_source_dict['Path']\n return merged_path\n\n def create_data_source(self, job_id, data_source_dict):\n \"\"\"Creates one data source given a job id, and a data_source_dict (from DataSources.json)\"\"\"\n data_source_args = {}\n service = service_factory.create_service(data_source_dict)\n path = self._merge_path(job_id, data_source_dict)\n data_source_args['Path'] = path\n data_source_args['Source Name'] = data_source_dict['Source Name']\n data_source_args['Status'] = data_source_dict['Status']\n data_source = ds.DataSource(job_id, service, data_source_args)\n return data_source\n\n def create_data_source_list(self, job_id_list, data_source_dict):\n \"\"\"Creates a list of data sources given a list of ids and a data_source_dict\"\"\"\n data_sources = list()\n for job_id in job_id_list:\n data_source = self.create_data_source(job_id, data_source_dict)\n data_sources.append(data_source)\n return data_sources\n", "source": "the_stack_v2_python_sparse", "source_path": "Factories/data_source_factory.py", "source_repo": "McFunston/PythonScheduleTools", "split": "test", "star_events_count": 1} {"blob_id": "92d64889fbd888a966aa6ba04dddd59272162435", "bodies": ["if plan is None:\n plan = self.migration_plan(targets)\nfull_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\nstate = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\nself.check_replacements()\nreturn state", "migrations_to_run = {m[0] for m in plan}\nstate = ProjectState(real_apps=list(self.loader.unmigrated_apps))\nfor migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\nreturn state"], "bodies_text": "<|body_start_0|>\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n<|end_body_0|>\n\n<|body_start_1|>\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n<|end_body_1|>\n", "class_docstring": "", "class_name": "BackupMigrationExecutor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BackupMigrationExecutor:\n\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n <|body_0|>\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n<|end_body_0|>\n\n<|body_start_1|>\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000322", "length_bytes": 2168, "license_type": "no_license", "methods": [{"docstring": "Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.", "name": "migrate", "signature": "def migrate(self, targets, plan=None, fake=False, fake_initial=False)"}, {"docstring": "Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.", "name": "_migrate_all_forwards", "signature": "def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_029451", "prompt": "Implement the Python class `BackupMigrationExecutor` described below.\n\nClass description:\nImplement the BackupMigrationExecutor class.\n\nMethod signatures and docstrings:\n- def migrate(self, targets, plan=None, fake=False, fake_initial=False): Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\n- def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial): Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.", "prompted_full_text": "Implement the Python class `BackupMigrationExecutor` described below.\n\nClass description:\nImplement the BackupMigrationExecutor class.\n\nMethod signatures and docstrings:\n- def migrate(self, targets, plan=None, fake=False, fake_initial=False): Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\n- def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial): Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\n\n<|skeleton|>\nclass BackupMigrationExecutor:\n\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n <|body_0|>\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n<|end_body_0|>\n\n<|body_start_1|>\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n<|end_body_1|>\n", "revision_id": "879111874d1ef70418b4890cf970720b0a2be4d8", "skeleton": "<|skeleton|>\nclass BackupMigrationExecutor:\n\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n <|body_0|>\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BackupMigrationExecutor:\n def migrate(self, targets, plan=None, fake=False, fake_initial=False):\n \"\"\"Migrates the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations.\"\"\"\n if plan is None:\n plan = self.migration_plan(targets)\n full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True)\n state = self._migrate_all_forwards(plan, full_plan, fake=fake, fake_initial=fake_initial)\n self.check_replacements()\n return state\n\n def _migrate_all_forwards(self, plan, full_plan, fake, fake_initial):\n \"\"\"Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan.\"\"\"\n migrations_to_run = {m[0] for m in plan}\n state = ProjectState(real_apps=list(self.loader.unmigrated_apps))\n for migration, _ in full_plan:\n if not migrations_to_run:\n break\n if migration in migrations_to_run:\n if 'apps' not in state.__dict__:\n if self.progress_callback:\n self.progress_callback('render_start')\n state.apps\n if self.progress_callback:\n self.progress_callback('render_success')\n state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial)\n migrations_to_run.remove(migration)\n return state\n", "source": "the_stack_v2_python_sparse", "source_path": "apps/backups/executor.py", "source_repo": "faierbol/syncano-platform", "split": "test", "star_events_count": 0} {"blob_id": "5f241be2be2171ebbe09cbc56a540e5e72f2c6b0", "bodies": ["rank = {}\nparent = {}\nfor u in elements:\n rank[u] = 0\n parent[u] = u\nself.rank = rank\nself.parent = parent", "parent = self.parent\npath = []\nappend = path.append\nwhile x != parent[x]:\n append(x)\n x = parent[x]\nfor v in path:\n parent[v] = x\nreturn x", "parent = self.parent\nrank = self.rank\npx = parent[x]\npy = parent[y]\nif rank[px] > rank[py]:\n x, y = (y, x)\nif rank[px] == rank[py]:\n rank[py] += 1\nparent[px] = py"], "bodies_text": "<|body_start_0|>\n rank = {}\n parent = {}\n for u in elements:\n rank[u] = 0\n parent[u] = u\n self.rank = rank\n self.parent = parent\n<|end_body_0|>\n\n<|body_start_1|>\n parent = self.parent\n path = []\n append = path.append\n while x != parent[x]:\n append(x)\n x = parent[x]\n for v in path:\n parent[v] = x\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n parent = self.parent\n rank = self.rank\n px = parent[x]\n py = parent[y]\n if rank[px] > rank[py]:\n x, y = (y, x)\n if rank[px] == rank[py]:\n rank[py] += 1\n parent[px] = py\n<|end_body_2|>\n", "class_docstring": "Implementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.", "class_name": "UnionFind", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UnionFind:\n \"\"\"Implementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.\"\"\"\n <|body_0|>\n\n def find(self, x):\n \"\"\"Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.\"\"\"\n <|body_1|>\n\n def union(self, x, y):\n \"\"\"Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rank = {}\n parent = {}\n for u in elements:\n rank[u] = 0\n parent[u] = u\n self.rank = rank\n self.parent = parent\n<|end_body_0|>\n\n<|body_start_1|>\n parent = self.parent\n path = []\n append = path.append\n while x != parent[x]:\n append(x)\n x = parent[x]\n for v in path:\n parent[v] = x\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n parent = self.parent\n rank = self.rank\n px = parent[x]\n py = parent[y]\n if rank[px] > rank[py]:\n x, y = (y, x)\n if rank[px] == rank[py]:\n rank[py] += 1\n parent[px] = py\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000323", "length_bytes": 4004, "license_type": "no_license", "methods": [{"docstring": "Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.", "name": "__init__", "signature": "def __init__(self, elements)"}, {"docstring": "Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.", "name": "find", "signature": "def find(self, x)"}, {"docstring": "Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.", "name": "union", "signature": "def union(self, x, y)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_032493", "prompt": "Implement the Python class `UnionFind` described below.\n\nClass description:\nImplementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.\n\nMethod signatures and docstrings:\n- def __init__(self, elements): Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.\n- def find(self, x): Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.\n- def union(self, x, y): Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.", "prompted_full_text": "Implement the Python class `UnionFind` described below.\n\nClass description:\nImplementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.\n\nMethod signatures and docstrings:\n- def __init__(self, elements): Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.\n- def find(self, x): Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.\n- def union(self, x, y): Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.\n\n<|skeleton|>\nclass UnionFind:\n \"\"\"Implementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.\"\"\"\n <|body_0|>\n\n def find(self, x):\n \"\"\"Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.\"\"\"\n <|body_1|>\n\n def union(self, x, y):\n \"\"\"Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n rank = {}\n parent = {}\n for u in elements:\n rank[u] = 0\n parent[u] = u\n self.rank = rank\n self.parent = parent\n<|end_body_0|>\n\n<|body_start_1|>\n parent = self.parent\n path = []\n append = path.append\n while x != parent[x]:\n append(x)\n x = parent[x]\n for v in path:\n parent[v] = x\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n parent = self.parent\n rank = self.rank\n px = parent[x]\n py = parent[y]\n if rank[px] > rank[py]:\n x, y = (y, x)\n if rank[px] == rank[py]:\n rank[py] += 1\n parent[px] = py\n<|end_body_2|>\n", "revision_id": "f03e2c7c4931221e0885f29b36d8c4385254ae8d", "skeleton": "<|skeleton|>\nclass UnionFind:\n \"\"\"Implementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.\"\"\"\n <|body_0|>\n\n def find(self, x):\n \"\"\"Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.\"\"\"\n <|body_1|>\n\n def union(self, x, y):\n \"\"\"Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UnionFind:\n \"\"\"Implementation of a disjoint set data structure. This data structure solves the problem of maintaining a collection of disjoint sets under the operation of union. To determine if two elements live in the same set, we implement a find operation which returns the name of the set containing the given element. Our implementation follows Chapter 2 of \"Data Structures and Network Algorithms\" by Tarjan. We implement both the union by rank and the path compression heuristics for fast amortized runtime.\"\"\"\n\n def __init__(self, elements):\n \"\"\"Initialize a new UnionFind object containing the given elements. Given a list of elements, we initialize the data structure by putting each element into its own set. We represent each set by a rooted tree. The nodes of the tree are the elements of the set, and the representative of each set is the tree root. For each node x, we keep track of its parent node. By convention, we make the root element its own parent.\"\"\"\n rank = {}\n parent = {}\n for u in elements:\n rank[u] = 0\n parent[u] = u\n self.rank = rank\n self.parent = parent\n\n def find(self, x):\n \"\"\"Returns the name of the set containing the element x. Here, we use the path compression heuristic. This changes the structure of the tree during a find by moving nodes closer to the root. When carrying out find(x), after locating the root r of the tree containing x, we make every node on the path from x to r point directly to r. This heuristic increases the time of a single find by a constant factor, but saves enough time in later finds to more than pay for itself.\"\"\"\n parent = self.parent\n path = []\n append = path.append\n while x != parent[x]:\n append(x)\n x = parent[x]\n for v in path:\n parent[v] = x\n return x\n\n def union(self, x, y):\n \"\"\"Combines the sets named x and y. Here, we use the union by rank heuristic to keep the trees shallow. With each node x, we store a nonnegative integer rank that is an upper bound on the height of x.\"\"\"\n parent = self.parent\n rank = self.rank\n px = parent[x]\n py = parent[y]\n if rank[px] > rank[py]:\n x, y = (y, x)\n if rank[px] == rank[py]:\n rank[py] += 1\n parent[px] = py\n", "source": "the_stack_v2_python_sparse", "source_path": "rosemary/data_structures/unionfind.py", "source_repo": "fagan2888/rosemary", "split": "test", "star_events_count": 0} {"blob_id": "6b2d1c3cdbbaec9e7c21a44fddb5607987482fbe", "bodies": ["url = f'{COZA_HOST}/exchanges'\nexchange_li = _request(url, 'GET')\nexchange_info = {}\nfor exchange in exchange_li['results']:\n currency_li = []\n if exchange['name'] not in exchange_info.keys():\n for currency in exchange['currencies']:\n currency_li.append(currency['label'])\n exchange_info[exchange['name']] = [currency_li, exchange['intervals'], exchange['feerate']]\nreturn exchange_info", "url = f'{COZA_HOST}/exchanges/{exchange.lower()}/ticker'\ndata = _request(url, 'GET', params={'currency': currency.upper()})\nreturn data", "url = f'{COZA_HOST}/exchanges/{exchange.lower()}/orderbook'\ndata = _request(url, 'GET', params={'currency': currency.upper()})\nreturn data", "url = f'{COZA_HOST}/exchanges/upbit/market'\nresp = _request(url, 'GET')\ndata = {}\ntry:\n for k in resp.keys():\n if k == 'remain_req':\n data[k] = resp[k]\n else:\n data[k] = pd.DataFrame(resp[k])\nexcept Exception as e:\n print(e)\nreturn data"], "bodies_text": "<|body_start_0|>\n url = f'{COZA_HOST}/exchanges'\n exchange_li = _request(url, 'GET')\n exchange_info = {}\n for exchange in exchange_li['results']:\n currency_li = []\n if exchange['name'] not in exchange_info.keys():\n for currency in exchange['currencies']:\n currency_li.append(currency['label'])\n exchange_info[exchange['name']] = [currency_li, exchange['intervals'], exchange['feerate']]\n return exchange_info\n<|end_body_0|>\n\n<|body_start_1|>\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/ticker'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/orderbook'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n url = f'{COZA_HOST}/exchanges/upbit/market'\n resp = _request(url, 'GET')\n data = {}\n try:\n for k in resp.keys():\n if k == 'remain_req':\n data[k] = resp[k]\n else:\n data[k] = pd.DataFrame(resp[k])\n except Exception as e:\n print(e)\n return data\n<|end_body_3|>\n", "class_docstring": "", "class_name": "ExchangeApi", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ExchangeApi:\n\n def get_exchange_info(cls):\n \"\"\"Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)\"\"\"\n <|body_0|>\n\n def get_ticker(cls, exchange, currency):\n \"\"\"Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n <|body_1|>\n\n def get_orderbook(cls, exchange, currency):\n \"\"\"Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n <|body_2|>\n\n def get_markets(cls):\n \"\"\"Get markets upbit exchange Returns: data(dict)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = f'{COZA_HOST}/exchanges'\n exchange_li = _request(url, 'GET')\n exchange_info = {}\n for exchange in exchange_li['results']:\n currency_li = []\n if exchange['name'] not in exchange_info.keys():\n for currency in exchange['currencies']:\n currency_li.append(currency['label'])\n exchange_info[exchange['name']] = [currency_li, exchange['intervals'], exchange['feerate']]\n return exchange_info\n<|end_body_0|>\n\n<|body_start_1|>\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/ticker'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/orderbook'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n url = f'{COZA_HOST}/exchanges/upbit/market'\n resp = _request(url, 'GET')\n data = {}\n try:\n for k in resp.keys():\n if k == 'remain_req':\n data[k] = resp[k]\n else:\n data[k] = pd.DataFrame(resp[k])\n except Exception as e:\n print(e)\n return data\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000324", "length_bytes": 6257, "license_type": "permissive", "methods": [{"docstring": "Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)", "name": "get_exchange_info", "signature": "def get_exchange_info(cls)"}, {"docstring": "Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)", "name": "get_ticker", "signature": "def get_ticker(cls, exchange, currency)"}, {"docstring": "Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)", "name": "get_orderbook", "signature": "def get_orderbook(cls, exchange, currency)"}, {"docstring": "Get markets upbit exchange Returns: data(dict)", "name": "get_markets", "signature": "def get_markets(cls)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_015860", "prompt": "Implement the Python class `ExchangeApi` described below.\n\nClass description:\nImplement the ExchangeApi class.\n\nMethod signatures and docstrings:\n- def get_exchange_info(cls): Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)\n- def get_ticker(cls, exchange, currency): Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)\n- def get_orderbook(cls, exchange, currency): Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)\n- def get_markets(cls): Get markets upbit exchange Returns: data(dict)", "prompted_full_text": "Implement the Python class `ExchangeApi` described below.\n\nClass description:\nImplement the ExchangeApi class.\n\nMethod signatures and docstrings:\n- def get_exchange_info(cls): Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)\n- def get_ticker(cls, exchange, currency): Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)\n- def get_orderbook(cls, exchange, currency): Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)\n- def get_markets(cls): Get markets upbit exchange Returns: data(dict)\n\n<|skeleton|>\nclass ExchangeApi:\n\n def get_exchange_info(cls):\n \"\"\"Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)\"\"\"\n <|body_0|>\n\n def get_ticker(cls, exchange, currency):\n \"\"\"Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n <|body_1|>\n\n def get_orderbook(cls, exchange, currency):\n \"\"\"Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n <|body_2|>\n\n def get_markets(cls):\n \"\"\"Get markets upbit exchange Returns: data(dict)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = f'{COZA_HOST}/exchanges'\n exchange_li = _request(url, 'GET')\n exchange_info = {}\n for exchange in exchange_li['results']:\n currency_li = []\n if exchange['name'] not in exchange_info.keys():\n for currency in exchange['currencies']:\n currency_li.append(currency['label'])\n exchange_info[exchange['name']] = [currency_li, exchange['intervals'], exchange['feerate']]\n return exchange_info\n<|end_body_0|>\n\n<|body_start_1|>\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/ticker'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n<|end_body_1|>\n\n<|body_start_2|>\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/orderbook'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n<|end_body_2|>\n\n<|body_start_3|>\n url = f'{COZA_HOST}/exchanges/upbit/market'\n resp = _request(url, 'GET')\n data = {}\n try:\n for k in resp.keys():\n if k == 'remain_req':\n data[k] = resp[k]\n else:\n data[k] = pd.DataFrame(resp[k])\n except Exception as e:\n print(e)\n return data\n<|end_body_3|>\n", "revision_id": "1068c33fdbd55bc7e8e0968fcee62ef0786ff323", "skeleton": "<|skeleton|>\nclass ExchangeApi:\n\n def get_exchange_info(cls):\n \"\"\"Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)\"\"\"\n <|body_0|>\n\n def get_ticker(cls, exchange, currency):\n \"\"\"Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n <|body_1|>\n\n def get_orderbook(cls, exchange, currency):\n \"\"\"Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n <|body_2|>\n\n def get_markets(cls):\n \"\"\"Get markets upbit exchange Returns: data(dict)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ExchangeApi:\n def get_exchange_info(cls):\n \"\"\"Get COZA Service exchange and curreny info Args: None Returns: exchange_info(dict)\"\"\"\n url = f'{COZA_HOST}/exchanges'\n exchange_li = _request(url, 'GET')\n exchange_info = {}\n for exchange in exchange_li['results']:\n currency_li = []\n if exchange['name'] not in exchange_info.keys():\n for currency in exchange['currencies']:\n currency_li.append(currency['label'])\n exchange_info[exchange['name']] = [currency_li, exchange['intervals'], exchange['feerate']]\n return exchange_info\n\n def get_ticker(cls, exchange, currency):\n \"\"\"Get current ticker Args: exchange(str): Cryptocurrency exchagne name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/ticker'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n\n def get_orderbook(cls, exchange, currency):\n \"\"\"Get orderbook current time Args: exchange(str): Cryptocurrency exchange name currency(str): Cryptocurrency name Returns: data(dict)\"\"\"\n url = f'{COZA_HOST}/exchanges/{exchange.lower()}/orderbook'\n data = _request(url, 'GET', params={'currency': currency.upper()})\n return data\n\n def get_markets(cls):\n \"\"\"Get markets upbit exchange Returns: data(dict)\"\"\"\n url = f'{COZA_HOST}/exchanges/upbit/market'\n resp = _request(url, 'GET')\n data = {}\n try:\n for k in resp.keys():\n if k == 'remain_req':\n data[k] = resp[k]\n else:\n data[k] = pd.DataFrame(resp[k])\n except Exception as e:\n print(e)\n return data\n", "source": "the_stack_v2_python_sparse", "source_path": "coza/api/public.py", "source_repo": "Derek-tjhwang/CATS-LAB", "split": "test", "star_events_count": 2} {"blob_id": "bb9c5821c0f60cd8624552faeda136ca830752eb", "bodies": ["self.ctype = 'list_extract'\n'str: type of data processor'\nself.lam = lam\nself.dtype = list\n'type(list): the datatype performing `lam` should yield'", "try:\n res = self.dtype(self.lam(lam_arg))\nexcept Exception as e:\n logger = get_logger()\n logger.exception(e)\n res = None\nreturn res"], "bodies_text": "<|body_start_0|>\n self.ctype = 'list_extract'\n 'str: type of data processor'\n self.lam = lam\n self.dtype = list\n 'type(list): the datatype performing `lam` should yield'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n res = self.dtype(self.lam(lam_arg))\n except Exception as e:\n logger = get_logger()\n logger.exception(e)\n res = None\n return res\n<|end_body_1|>\n", "class_docstring": "Used to extract a list of dictionaries that will each represent a single row in a database", "class_name": "ListExtract", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ListExtract:\n \"\"\"Used to extract a list of dictionaries that will each represent a single row in a database\"\"\"\n\n def __init__(self, lam: Callable[[dict], list]):\n \"\"\"Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame\"\"\"\n <|body_0|>\n\n def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]:\n \"\"\"Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ctype = 'list_extract'\n 'str: type of data processor'\n self.lam = lam\n self.dtype = list\n 'type(list): the datatype performing `lam` should yield'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n res = self.dtype(self.lam(lam_arg))\n except Exception as e:\n logger = get_logger()\n logger.exception(e)\n res = None\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000325", "length_bytes": 2367, "license_type": "permissive", "methods": [{"docstring": "Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame", "name": "__init__", "signature": "def __init__(self, lam: Callable[[dict], list])"}, {"docstring": "Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None", "name": "lam_wrap", "signature": "def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001371", "prompt": "Implement the Python class `ListExtract` described below.\n\nClass description:\nUsed to extract a list of dictionaries that will each represent a single row in a database\n\nMethod signatures and docstrings:\n- def __init__(self, lam: Callable[[dict], list]): Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame\n- def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]: Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None", "prompted_full_text": "Implement the Python class `ListExtract` described below.\n\nClass description:\nUsed to extract a list of dictionaries that will each represent a single row in a database\n\nMethod signatures and docstrings:\n- def __init__(self, lam: Callable[[dict], list]): Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame\n- def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]: Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None\n\n<|skeleton|>\nclass ListExtract:\n \"\"\"Used to extract a list of dictionaries that will each represent a single row in a database\"\"\"\n\n def __init__(self, lam: Callable[[dict], list]):\n \"\"\"Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame\"\"\"\n <|body_0|>\n\n def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]:\n \"\"\"Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.ctype = 'list_extract'\n 'str: type of data processor'\n self.lam = lam\n self.dtype = list\n 'type(list): the datatype performing `lam` should yield'\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n res = self.dtype(self.lam(lam_arg))\n except Exception as e:\n logger = get_logger()\n logger.exception(e)\n res = None\n return res\n<|end_body_1|>\n", "revision_id": "8c8b14280441f5153ff146c23359a0eb91022ddb", "skeleton": "<|skeleton|>\nclass ListExtract:\n \"\"\"Used to extract a list of dictionaries that will each represent a single row in a database\"\"\"\n\n def __init__(self, lam: Callable[[dict], list]):\n \"\"\"Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame\"\"\"\n <|body_0|>\n\n def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]:\n \"\"\"Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ListExtract:\n \"\"\"Used to extract a list of dictionaries that will each represent a single row in a database\"\"\"\n\n def __init__(self, lam: Callable[[dict], list]):\n \"\"\"Creates a ListExtract object Args: lam: Anonymous function that attempts to extract a list of data that will become rows in a DataFrame\"\"\"\n self.ctype = 'list_extract'\n 'str: type of data processor'\n self.lam = lam\n self.dtype = list\n 'type(list): the datatype performing `lam` should yield'\n\n def lam_wrap(self, lam_arg: dict) -> Union[List[dict], None]:\n \"\"\"Overrides super class method Workflow: 1. Attempt to perform the ``lam`` operation on the incoming data 2. Attempt to cast the result ``lam`` operation to a list * If an exception occurs, return None 3. Return the list of data Args: lam_arg: A dictionary containing a list of dictionaries that will become the rows of a DataFrame Returns: A list of dictionaries that will become the rows of a DataFrame if successful otherwise None\"\"\"\n try:\n res = self.dtype(self.lam(lam_arg))\n except Exception as e:\n logger = get_logger()\n logger.exception(e)\n res = None\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "src/api2db/ingest/pre_process/list_extract.py", "source_repo": "TristenHarr/api2db", "split": "test", "star_events_count": 46} {"blob_id": "06da95239ad60e13a8ef923d19cd2e39ef28856c", "bodies": ["logger.info('Processing Genome')\nif configuration is None:\n configuration = {}\nself.configuration.update(configuration)", "output_files_generated = {}\noutput_metadata = {}\ngenome_input_file = {'genome': input_files['genome']}\ngenome_input_meta = {'genome': metadata['genome']}\nif 'genome_public' in input_files:\n genome_input_file = {'genome': input_files['genome_public']}\n genome_input_meta = {'genome': metadata['genome_public']}\nlogger.info('Generating BSgenome')\nbsg = bsgenomeTool(self.configuration)\nlogger.progress('BSgenome Indexer', status='RUNNING')\nbsgi, bsgm = bsg.run(genome_input_file, genome_input_meta, output_files)\nlogger.progress('BSgenome Indexer', status='DONE')\ntry:\n for file_key in ['bsgenome', 'chrom_size', 'genome_2bit', 'seed_file']:\n output_files_generated[file_key] = bsgi[file_key]\n output_metadata[file_key] = bsgm[file_key]\n tool_name = output_metadata[file_key].meta_data['tool']\n output_metadata[file_key].meta_data['tool_description'] = tool_name\n output_metadata[file_key].meta_data['tool'] = 'process_bsgenome'\nexcept KeyError:\n logger.fatal('BSgenome indexer failed')\nreturn (output_files_generated, output_metadata)"], "bodies_text": "<|body_start_0|>\n logger.info('Processing Genome')\n if configuration is None:\n configuration = {}\n self.configuration.update(configuration)\n<|end_body_0|>\n\n<|body_start_1|>\n output_files_generated = {}\n output_metadata = {}\n genome_input_file = {'genome': input_files['genome']}\n genome_input_meta = {'genome': metadata['genome']}\n if 'genome_public' in input_files:\n genome_input_file = {'genome': input_files['genome_public']}\n genome_input_meta = {'genome': metadata['genome_public']}\n logger.info('Generating BSgenome')\n bsg = bsgenomeTool(self.configuration)\n logger.progress('BSgenome Indexer', status='RUNNING')\n bsgi, bsgm = bsg.run(genome_input_file, genome_input_meta, output_files)\n logger.progress('BSgenome Indexer', status='DONE')\n try:\n for file_key in ['bsgenome', 'chrom_size', 'genome_2bit', 'seed_file']:\n output_files_generated[file_key] = bsgi[file_key]\n output_metadata[file_key] = bsgm[file_key]\n tool_name = output_metadata[file_key].meta_data['tool']\n output_metadata[file_key].meta_data['tool_description'] = tool_name\n output_metadata[file_key].meta_data['tool'] = 'process_bsgenome'\n except KeyError:\n logger.fatal('BSgenome indexer failed')\n return (output_files_generated, output_metadata)\n<|end_body_1|>\n", "class_docstring": "Workflow to download and pre-index a given genome", "class_name": "process_bsgenome", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass process_bsgenome:\n \"\"\"Workflow to download and pre-index a given genome\"\"\"\n\n def __init__(self, configuration=None):\n \"\"\"Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.\"\"\"\n <|body_0|>\n\n def run(self, input_files, metadata, output_files):\n \"\"\"Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.info('Processing Genome')\n if configuration is None:\n configuration = {}\n self.configuration.update(configuration)\n<|end_body_0|>\n\n<|body_start_1|>\n output_files_generated = {}\n output_metadata = {}\n genome_input_file = {'genome': input_files['genome']}\n genome_input_meta = {'genome': metadata['genome']}\n if 'genome_public' in input_files:\n genome_input_file = {'genome': input_files['genome_public']}\n genome_input_meta = {'genome': metadata['genome_public']}\n logger.info('Generating BSgenome')\n bsg = bsgenomeTool(self.configuration)\n logger.progress('BSgenome Indexer', status='RUNNING')\n bsgi, bsgm = bsg.run(genome_input_file, genome_input_meta, output_files)\n logger.progress('BSgenome Indexer', status='DONE')\n try:\n for file_key in ['bsgenome', 'chrom_size', 'genome_2bit', 'seed_file']:\n output_files_generated[file_key] = bsgi[file_key]\n output_metadata[file_key] = bsgm[file_key]\n tool_name = output_metadata[file_key].meta_data['tool']\n output_metadata[file_key].meta_data['tool_description'] = tool_name\n output_metadata[file_key].meta_data['tool'] = 'process_bsgenome'\n except KeyError:\n logger.fatal('BSgenome indexer failed')\n return (output_files_generated, output_metadata)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000326", "length_bytes": 5291, "license_type": "permissive", "methods": [{"docstring": "Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.", "name": "__init__", "signature": "def __init__(self, configuration=None)"}, {"docstring": "Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files", "name": "run", "signature": "def run(self, input_files, metadata, output_files)"}], "n_methods": 2, "prompt": "Implement the Python class `process_bsgenome` described below.\n\nClass description:\nWorkflow to download and pre-index a given genome\n\nMethod signatures and docstrings:\n- def __init__(self, configuration=None): Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.\n- def run(self, input_files, metadata, output_files): Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files", "prompted_full_text": "Implement the Python class `process_bsgenome` described below.\n\nClass description:\nWorkflow to download and pre-index a given genome\n\nMethod signatures and docstrings:\n- def __init__(self, configuration=None): Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.\n- def run(self, input_files, metadata, output_files): Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files\n\n<|skeleton|>\nclass process_bsgenome:\n \"\"\"Workflow to download and pre-index a given genome\"\"\"\n\n def __init__(self, configuration=None):\n \"\"\"Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.\"\"\"\n <|body_0|>\n\n def run(self, input_files, metadata, output_files):\n \"\"\"Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.info('Processing Genome')\n if configuration is None:\n configuration = {}\n self.configuration.update(configuration)\n<|end_body_0|>\n\n<|body_start_1|>\n output_files_generated = {}\n output_metadata = {}\n genome_input_file = {'genome': input_files['genome']}\n genome_input_meta = {'genome': metadata['genome']}\n if 'genome_public' in input_files:\n genome_input_file = {'genome': input_files['genome_public']}\n genome_input_meta = {'genome': metadata['genome_public']}\n logger.info('Generating BSgenome')\n bsg = bsgenomeTool(self.configuration)\n logger.progress('BSgenome Indexer', status='RUNNING')\n bsgi, bsgm = bsg.run(genome_input_file, genome_input_meta, output_files)\n logger.progress('BSgenome Indexer', status='DONE')\n try:\n for file_key in ['bsgenome', 'chrom_size', 'genome_2bit', 'seed_file']:\n output_files_generated[file_key] = bsgi[file_key]\n output_metadata[file_key] = bsgm[file_key]\n tool_name = output_metadata[file_key].meta_data['tool']\n output_metadata[file_key].meta_data['tool_description'] = tool_name\n output_metadata[file_key].meta_data['tool'] = 'process_bsgenome'\n except KeyError:\n logger.fatal('BSgenome indexer failed')\n return (output_files_generated, output_metadata)\n<|end_body_1|>\n", "revision_id": "50c7115c0c1a6af48dc34f275e469d1b9eb02999", "skeleton": "<|skeleton|>\nclass process_bsgenome:\n \"\"\"Workflow to download and pre-index a given genome\"\"\"\n\n def __init__(self, configuration=None):\n \"\"\"Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.\"\"\"\n <|body_0|>\n\n def run(self, input_files, metadata, output_files):\n \"\"\"Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class process_bsgenome:\n \"\"\"Workflow to download and pre-index a given genome\"\"\"\n\n def __init__(self, configuration=None):\n \"\"\"Initialise the class Parameters ---------- configuration : dict a dictionary containing parameters that define how the operation should be carried out, which are specific to each Tool.\"\"\"\n logger.info('Processing Genome')\n if configuration is None:\n configuration = {}\n self.configuration.update(configuration)\n\n def run(self, input_files, metadata, output_files):\n \"\"\"Main run function for the indexing of genome assembly FASTA files. The pipeline uses Bowtie2, BWA and GEM ready for use in pipelines that rely on alignment. Parameters ---------- input_files : dict genome : str Location of the FASTA input file metadata : dict genome : dict Required meta data output_files : dict BSgenome : str Location of a the BSgenome R package Returns ------- outputfiles : dict List of locations for the output index files output_metadata : dict Metadata about each of the files\"\"\"\n output_files_generated = {}\n output_metadata = {}\n genome_input_file = {'genome': input_files['genome']}\n genome_input_meta = {'genome': metadata['genome']}\n if 'genome_public' in input_files:\n genome_input_file = {'genome': input_files['genome_public']}\n genome_input_meta = {'genome': metadata['genome_public']}\n logger.info('Generating BSgenome')\n bsg = bsgenomeTool(self.configuration)\n logger.progress('BSgenome Indexer', status='RUNNING')\n bsgi, bsgm = bsg.run(genome_input_file, genome_input_meta, output_files)\n logger.progress('BSgenome Indexer', status='DONE')\n try:\n for file_key in ['bsgenome', 'chrom_size', 'genome_2bit', 'seed_file']:\n output_files_generated[file_key] = bsgi[file_key]\n output_metadata[file_key] = bsgm[file_key]\n tool_name = output_metadata[file_key].meta_data['tool']\n output_metadata[file_key].meta_data['tool_description'] = tool_name\n output_metadata[file_key].meta_data['tool'] = 'process_bsgenome'\n except KeyError:\n logger.fatal('BSgenome indexer failed')\n return (output_files_generated, output_metadata)\n", "source": "the_stack_v2_python_sparse", "source_path": "process_bsgenome.py", "source_repo": "Multiscale-Genomics/mg-process-fastq", "split": "test", "star_events_count": 2} {"blob_id": "9a6a4715c618d8ca04ed8790b77e3ca623c601de", "bodies": ["self.cause_var = cause_var\nself.effect_var = effect_var\nself.cause_states = cause_states\nself.effect_states = effect_states\nself.cause_cprob = cause_cprob\nself.effect_cprob = effect_cprob\nself.grid = grid", "g = self.grid\nquery_nodes = []\nfor vname in disc_bn.V:\n query_name = \"'\" + self.effect_var + \"'\"\n if vname[:len(query_name)] == query_name:\n query_nodes.append(vname)\nnodes_in_grid = [[[] for x in np.arange(g.tx, g.tx + g.width, g.dx)] for y in np.arange(g.ty, g.ty + g.height, g.dy)]\nfor qn in query_nodes:\n x, y = vertex_locations[qn]\n i = int((y - g.ty) / g.dy)\n j = int((x - g.tx) / g.dx)\n if len(nodes_in_grid) <= i or i < 0 or len(nodes_in_grid[0]) <= j or (j < 0):\n continue\n nodes_in_grid[i][j].append(qn)\nfor i, row in enumerate(nodes_in_grid):\n for j, cell in enumerate(row):\n parent_location = [g.tx + (j + 0.5) * g.dx, g.ty + (i + 0.5) * g.dy]\n parent_name = \"'\" + self.cause_var + \"' \" + str(parent_location)\n if parent_name in disc_bn.V:\n continue\n disc_bn.add_vertex(parent_name, self.cause_states)\n disc_bn.set_cprob(parent_name, self.cause_cprob)\n vertex_locations[parent_name] = parent_location\n for child_node in cell:\n disc_bn.add_edge([parent_name, child_node])\n disc_bn.set_cprob(child_node, self.effect_cprob)"], "bodies_text": "<|body_start_0|>\n self.cause_var = cause_var\n self.effect_var = effect_var\n self.cause_states = cause_states\n self.effect_states = effect_states\n self.cause_cprob = cause_cprob\n self.effect_cprob = effect_cprob\n self.grid = grid\n<|end_body_0|>\n\n<|body_start_1|>\n g = self.grid\n query_nodes = []\n for vname in disc_bn.V:\n query_name = \"'\" + self.effect_var + \"'\"\n if vname[:len(query_name)] == query_name:\n query_nodes.append(vname)\n nodes_in_grid = [[[] for x in np.arange(g.tx, g.tx + g.width, g.dx)] for y in np.arange(g.ty, g.ty + g.height, g.dy)]\n for qn in query_nodes:\n x, y = vertex_locations[qn]\n i = int((y - g.ty) / g.dy)\n j = int((x - g.tx) / g.dx)\n if len(nodes_in_grid) <= i or i < 0 or len(nodes_in_grid[0]) <= j or (j < 0):\n continue\n nodes_in_grid[i][j].append(qn)\n for i, row in enumerate(nodes_in_grid):\n for j, cell in enumerate(row):\n parent_location = [g.tx + (j + 0.5) * g.dx, g.ty + (i + 0.5) * g.dy]\n parent_name = \"'\" + self.cause_var + \"' \" + str(parent_location)\n if parent_name in disc_bn.V:\n continue\n disc_bn.add_vertex(parent_name, self.cause_states)\n disc_bn.set_cprob(parent_name, self.cause_cprob)\n vertex_locations[parent_name] = parent_location\n for child_node in cell:\n disc_bn.add_edge([parent_name, child_node])\n disc_bn.set_cprob(child_node, self.effect_cprob)\n<|end_body_1|>\n", "class_docstring": "This type or rule creates a grid and group by the center of the cell.", "class_name": "GridRule", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GridRule:\n \"\"\"This type or rule creates a grid and group by the center of the cell.\"\"\"\n\n def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid):\n \"\"\"Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation\"\"\"\n <|body_0|>\n\n def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations):\n \"\"\":param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cause_var = cause_var\n self.effect_var = effect_var\n self.cause_states = cause_states\n self.effect_states = effect_states\n self.cause_cprob = cause_cprob\n self.effect_cprob = effect_cprob\n self.grid = grid\n<|end_body_0|>\n\n<|body_start_1|>\n g = self.grid\n query_nodes = []\n for vname in disc_bn.V:\n query_name = \"'\" + self.effect_var + \"'\"\n if vname[:len(query_name)] == query_name:\n query_nodes.append(vname)\n nodes_in_grid = [[[] for x in np.arange(g.tx, g.tx + g.width, g.dx)] for y in np.arange(g.ty, g.ty + g.height, g.dy)]\n for qn in query_nodes:\n x, y = vertex_locations[qn]\n i = int((y - g.ty) / g.dy)\n j = int((x - g.tx) / g.dx)\n if len(nodes_in_grid) <= i or i < 0 or len(nodes_in_grid[0]) <= j or (j < 0):\n continue\n nodes_in_grid[i][j].append(qn)\n for i, row in enumerate(nodes_in_grid):\n for j, cell in enumerate(row):\n parent_location = [g.tx + (j + 0.5) * g.dx, g.ty + (i + 0.5) * g.dy]\n parent_name = \"'\" + self.cause_var + \"' \" + str(parent_location)\n if parent_name in disc_bn.V:\n continue\n disc_bn.add_vertex(parent_name, self.cause_states)\n disc_bn.set_cprob(parent_name, self.cause_cprob)\n vertex_locations[parent_name] = parent_location\n for child_node in cell:\n disc_bn.add_edge([parent_name, child_node])\n disc_bn.set_cprob(child_node, self.effect_cprob)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000327", "length_bytes": 3558, "license_type": "no_license", "methods": [{"docstring": "Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation", "name": "__init__", "signature": "def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid)"}, {"docstring": ":param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:", "name": "generate_inference", "signature": "def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002658", "prompt": "Implement the Python class `GridRule` described below.\n\nClass description:\nThis type or rule creates a grid and group by the center of the cell.\n\nMethod signatures and docstrings:\n- def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid): Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation\n- def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations): :param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:", "prompted_full_text": "Implement the Python class `GridRule` described below.\n\nClass description:\nThis type or rule creates a grid and group by the center of the cell.\n\nMethod signatures and docstrings:\n- def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid): Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation\n- def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations): :param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:\n\n<|skeleton|>\nclass GridRule:\n \"\"\"This type or rule creates a grid and group by the center of the cell.\"\"\"\n\n def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid):\n \"\"\"Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation\"\"\"\n <|body_0|>\n\n def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations):\n \"\"\":param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cause_var = cause_var\n self.effect_var = effect_var\n self.cause_states = cause_states\n self.effect_states = effect_states\n self.cause_cprob = cause_cprob\n self.effect_cprob = effect_cprob\n self.grid = grid\n<|end_body_0|>\n\n<|body_start_1|>\n g = self.grid\n query_nodes = []\n for vname in disc_bn.V:\n query_name = \"'\" + self.effect_var + \"'\"\n if vname[:len(query_name)] == query_name:\n query_nodes.append(vname)\n nodes_in_grid = [[[] for x in np.arange(g.tx, g.tx + g.width, g.dx)] for y in np.arange(g.ty, g.ty + g.height, g.dy)]\n for qn in query_nodes:\n x, y = vertex_locations[qn]\n i = int((y - g.ty) / g.dy)\n j = int((x - g.tx) / g.dx)\n if len(nodes_in_grid) <= i or i < 0 or len(nodes_in_grid[0]) <= j or (j < 0):\n continue\n nodes_in_grid[i][j].append(qn)\n for i, row in enumerate(nodes_in_grid):\n for j, cell in enumerate(row):\n parent_location = [g.tx + (j + 0.5) * g.dx, g.ty + (i + 0.5) * g.dy]\n parent_name = \"'\" + self.cause_var + \"' \" + str(parent_location)\n if parent_name in disc_bn.V:\n continue\n disc_bn.add_vertex(parent_name, self.cause_states)\n disc_bn.set_cprob(parent_name, self.cause_cprob)\n vertex_locations[parent_name] = parent_location\n for child_node in cell:\n disc_bn.add_edge([parent_name, child_node])\n disc_bn.set_cprob(child_node, self.effect_cprob)\n<|end_body_1|>\n", "revision_id": "961a7102f0be273bb962eba211a7144535753c2d", "skeleton": "<|skeleton|>\nclass GridRule:\n \"\"\"This type or rule creates a grid and group by the center of the cell.\"\"\"\n\n def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid):\n \"\"\"Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation\"\"\"\n <|body_0|>\n\n def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations):\n \"\"\":param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GridRule:\n \"\"\"This type or rule creates a grid and group by the center of the cell.\"\"\"\n\n def __init__(self, cause_var, effect_var, cause_states, effect_states, cause_cprob, effect_cprob, grid):\n \"\"\"Construct. :param cause_var: cause variable or parent (str) :param effect_var: effect variable (str) ----> TRIGGER. :param cause_states: possible states of the cause variable (str[]) :param effect_states: possible states of the effect variable (str[]) :param cause_cprob: Conditional Probability Table for the cause :param effect_cprob: Conditional Probability Table for the effect :param grid: grid representation\"\"\"\n self.cause_var = cause_var\n self.effect_var = effect_var\n self.cause_states = cause_states\n self.effect_states = effect_states\n self.cause_cprob = cause_cprob\n self.effect_cprob = effect_cprob\n self.grid = grid\n\n def generate_inference(self, disc_bn, evidences, bn_evidences, vertex_locations):\n \"\"\":param disc_bn: :param evidences: :param bn_evidences: :param vertex_locations:\"\"\"\n g = self.grid\n query_nodes = []\n for vname in disc_bn.V:\n query_name = \"'\" + self.effect_var + \"'\"\n if vname[:len(query_name)] == query_name:\n query_nodes.append(vname)\n nodes_in_grid = [[[] for x in np.arange(g.tx, g.tx + g.width, g.dx)] for y in np.arange(g.ty, g.ty + g.height, g.dy)]\n for qn in query_nodes:\n x, y = vertex_locations[qn]\n i = int((y - g.ty) / g.dy)\n j = int((x - g.tx) / g.dx)\n if len(nodes_in_grid) <= i or i < 0 or len(nodes_in_grid[0]) <= j or (j < 0):\n continue\n nodes_in_grid[i][j].append(qn)\n for i, row in enumerate(nodes_in_grid):\n for j, cell in enumerate(row):\n parent_location = [g.tx + (j + 0.5) * g.dx, g.ty + (i + 0.5) * g.dy]\n parent_name = \"'\" + self.cause_var + \"' \" + str(parent_location)\n if parent_name in disc_bn.V:\n continue\n disc_bn.add_vertex(parent_name, self.cause_states)\n disc_bn.set_cprob(parent_name, self.cause_cprob)\n vertex_locations[parent_name] = parent_location\n for child_node in cell:\n disc_bn.add_edge([parent_name, child_node])\n disc_bn.set_cprob(child_node, self.effect_cprob)\n", "source": "the_stack_v2_python_sparse", "source_path": "inference-engine/infengine/rules/GridRule.py", "source_repo": "verlab/perceptive-turtles", "split": "test", "star_events_count": 0} {"blob_id": "226826158e2f6b8d9717a4ca44c6bc8690282af4", "bodies": ["try:\n self.request_control = request.RequestController(endopoint=accounting_endpoint)\nexcept Exception as e:\n raise exceptions.ConfigurationException('Accounting server configuration failed %s. ' % e.message)", "path = '/set_accounting'\nparameters = {'admin_token': admin_token, 'accounting': accounting_info}\nout = self.request_control.execute_put(path=path, parameters=parameters)\nexceptions.make_log('info', 'ACCOUNTING DONE')\nreturn out"], "bodies_text": "<|body_start_0|>\n try:\n self.request_control = request.RequestController(endopoint=accounting_endpoint)\n except Exception as e:\n raise exceptions.ConfigurationException('Accounting server configuration failed %s. ' % e.message)\n<|end_body_0|>\n\n<|body_start_1|>\n path = '/set_accounting'\n parameters = {'admin_token': admin_token, 'accounting': accounting_info}\n out = self.request_control.execute_put(path=path, parameters=parameters)\n exceptions.make_log('info', 'ACCOUNTING DONE')\n return out\n<|end_body_1|>\n", "class_docstring": "Notification controller for batch systems.", "class_name": "BatchNotificationController", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BatchNotificationController:\n \"\"\"Notification controller for batch systems.\"\"\"\n\n def __init__(self, accounting_endpoint):\n \"\"\"Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:\"\"\"\n <|body_0|>\n\n def notify_accounting(self, admin_token, accounting_info):\n \"\"\"Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.request_control = request.RequestController(endopoint=accounting_endpoint)\n except Exception as e:\n raise exceptions.ConfigurationException('Accounting server configuration failed %s. ' % e.message)\n<|end_body_0|>\n\n<|body_start_1|>\n path = '/set_accounting'\n parameters = {'admin_token': admin_token, 'accounting': accounting_info}\n out = self.request_control.execute_put(path=path, parameters=parameters)\n exceptions.make_log('info', 'ACCOUNTING DONE')\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000328", "length_bytes": 29683, "license_type": "permissive", "methods": [{"docstring": "Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:", "name": "__init__", "signature": "def __init__(self, accounting_endpoint)"}, {"docstring": "Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output", "name": "notify_accounting", "signature": "def notify_accounting(self, admin_token, accounting_info)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_048944", "prompt": "Implement the Python class `BatchNotificationController` described below.\n\nClass description:\nNotification controller for batch systems.\n\nMethod signatures and docstrings:\n- def __init__(self, accounting_endpoint): Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:\n- def notify_accounting(self, admin_token, accounting_info): Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output", "prompted_full_text": "Implement the Python class `BatchNotificationController` described below.\n\nClass description:\nNotification controller for batch systems.\n\nMethod signatures and docstrings:\n- def __init__(self, accounting_endpoint): Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:\n- def notify_accounting(self, admin_token, accounting_info): Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output\n\n<|skeleton|>\nclass BatchNotificationController:\n \"\"\"Notification controller for batch systems.\"\"\"\n\n def __init__(self, accounting_endpoint):\n \"\"\"Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:\"\"\"\n <|body_0|>\n\n def notify_accounting(self, admin_token, accounting_info):\n \"\"\"Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.request_control = request.RequestController(endopoint=accounting_endpoint)\n except Exception as e:\n raise exceptions.ConfigurationException('Accounting server configuration failed %s. ' % e.message)\n<|end_body_0|>\n\n<|body_start_1|>\n path = '/set_accounting'\n parameters = {'admin_token': admin_token, 'accounting': accounting_info}\n out = self.request_control.execute_put(path=path, parameters=parameters)\n exceptions.make_log('info', 'ACCOUNTING DONE')\n return out\n<|end_body_1|>\n", "revision_id": "346f5bdd7a1ff6c705c30172661a93540d9f0985", "skeleton": "<|skeleton|>\nclass BatchNotificationController:\n \"\"\"Notification controller for batch systems.\"\"\"\n\n def __init__(self, accounting_endpoint):\n \"\"\"Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:\"\"\"\n <|body_0|>\n\n def notify_accounting(self, admin_token, accounting_info):\n \"\"\"Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BatchNotificationController:\n \"\"\"Notification controller for batch systems.\"\"\"\n\n def __init__(self, accounting_endpoint):\n \"\"\"Initialize the controller It set attributes and creates a Request controller by using the endpoint related to the accounting server. :param accounting_endpoint: :return:\"\"\"\n try:\n self.request_control = request.RequestController(endopoint=accounting_endpoint)\n except Exception as e:\n raise exceptions.ConfigurationException('Accounting server configuration failed %s. ' % e.message)\n\n def notify_accounting(self, admin_token, accounting_info):\n \"\"\"Execute a PUT request to notify the accounting In order to notify the job accounting information, it executes a PUT request to the accounting server :param admin_token: administration token. :param accounting_info: string within the accounting information :return: response output\"\"\"\n path = '/set_accounting'\n parameters = {'admin_token': admin_token, 'accounting': accounting_info}\n out = self.request_control.execute_put(path=path, parameters=parameters)\n exceptions.make_log('info', 'ACCOUNTING DONE')\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "bdocker/modules/batch.py", "source_repo": "indigo-dc/bdocker", "split": "test", "star_events_count": 4} {"blob_id": "0d662da1db5b332df14ebb91955b4649efc030bf", "bodies": ["self.header = HEADER\nself.session = session\nimgurl_pattern_compile = re.compile('.*?\n self.header = HEADER\n self.session = session\n imgurl_pattern_compile = re.compile('.*?\n\n<|body_start_1|>\n image = Image.open('data/crack_code.jpeg')\n image = image.convert('L')\n threshold = 127\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n iamge = image.point(table, '1')\n<|end_body_1|>\n\n<|body_start_2|>\n image = Image.open('crack_code.jpeg')\n image.show()\n code = input('出现验证码,请手动输入:')\n return self.send_code(code)\n<|end_body_2|>\n\n<|body_start_3|>\n re_url = quote_plus(self.re_current_url)\n re_url = re.sub('%2F', '%2f', re_url)\n re_url = re.sub('%3F', '%3f', re_url)\n re_url = re.sub('%3D', '%3d', re_url)\n send_url = 'http://kns.cnki.net/kns/brief/vericode.aspx?rurl=' + re_url + '&vericode=' + code\n self.header['Referer'] = send_url\n self.header['Upgrade-Insecure-Requests'] = '1'\n return self.session.get(send_url, headers=self.header).text\n<|end_body_3|>\n", "class_docstring": "", "class_name": "CrackCode", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CrackCode:\n\n def get_image(self, current_url, session, page_source):\n \"\"\"获取验证码图片\"\"\"\n <|body_0|>\n\n def crack_code(self):\n \"\"\"自动识别验证码\"\"\"\n <|body_1|>\n\n def handle_code(self):\n \"\"\"手动识别验证码\"\"\"\n <|body_2|>\n\n def send_code(self, code):\n \"\"\"发送验证码\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.header = HEADER\n self.session = session\n imgurl_pattern_compile = re.compile('.*?\n\n<|body_start_1|>\n image = Image.open('data/crack_code.jpeg')\n image = image.convert('L')\n threshold = 127\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n iamge = image.point(table, '1')\n<|end_body_1|>\n\n<|body_start_2|>\n image = Image.open('crack_code.jpeg')\n image.show()\n code = input('出现验证码,请手动输入:')\n return self.send_code(code)\n<|end_body_2|>\n\n<|body_start_3|>\n re_url = quote_plus(self.re_current_url)\n re_url = re.sub('%2F', '%2f', re_url)\n re_url = re.sub('%3F', '%3f', re_url)\n re_url = re.sub('%3D', '%3d', re_url)\n send_url = 'http://kns.cnki.net/kns/brief/vericode.aspx?rurl=' + re_url + '&vericode=' + code\n self.header['Referer'] = send_url\n self.header['Upgrade-Insecure-Requests'] = '1'\n return self.session.get(send_url, headers=self.header).text\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000329", "length_bytes": 2846, "license_type": "permissive", "methods": [{"docstring": "获取验证码图片", "name": "get_image", "signature": "def get_image(self, current_url, session, page_source)"}, {"docstring": "自动识别验证码", "name": "crack_code", "signature": "def crack_code(self)"}, {"docstring": "手动识别验证码", "name": "handle_code", "signature": "def handle_code(self)"}, {"docstring": "发送验证码", "name": "send_code", "signature": "def send_code(self, code)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_052296", "prompt": "Implement the Python class `CrackCode` described below.\n\nClass description:\nImplement the CrackCode class.\n\nMethod signatures and docstrings:\n- def get_image(self, current_url, session, page_source): 获取验证码图片\n- def crack_code(self): 自动识别验证码\n- def handle_code(self): 手动识别验证码\n- def send_code(self, code): 发送验证码", "prompted_full_text": "Implement the Python class `CrackCode` described below.\n\nClass description:\nImplement the CrackCode class.\n\nMethod signatures and docstrings:\n- def get_image(self, current_url, session, page_source): 获取验证码图片\n- def crack_code(self): 自动识别验证码\n- def handle_code(self): 手动识别验证码\n- def send_code(self, code): 发送验证码\n\n<|skeleton|>\nclass CrackCode:\n\n def get_image(self, current_url, session, page_source):\n \"\"\"获取验证码图片\"\"\"\n <|body_0|>\n\n def crack_code(self):\n \"\"\"自动识别验证码\"\"\"\n <|body_1|>\n\n def handle_code(self):\n \"\"\"手动识别验证码\"\"\"\n <|body_2|>\n\n def send_code(self, code):\n \"\"\"发送验证码\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.header = HEADER\n self.session = session\n imgurl_pattern_compile = re.compile('.*?\n\n<|body_start_1|>\n image = Image.open('data/crack_code.jpeg')\n image = image.convert('L')\n threshold = 127\n table = []\n for i in range(256):\n if i < threshold:\n table.append(0)\n else:\n table.append(1)\n iamge = image.point(table, '1')\n<|end_body_1|>\n\n<|body_start_2|>\n image = Image.open('crack_code.jpeg')\n image.show()\n code = input('出现验证码,请手动输入:')\n return self.send_code(code)\n<|end_body_2|>\n\n<|body_start_3|>\n re_url = quote_plus(self.re_current_url)\n re_url = re.sub('%2F', '%2f', re_url)\n re_url = re.sub('%3F', '%3f', re_url)\n re_url = re.sub('%3D', '%3d', re_url)\n send_url = 'http://kns.cnki.net/kns/brief/vericode.aspx?rurl=' + re_url + '&vericode=' + code\n self.header['Referer'] = send_url\n self.header['Upgrade-Insecure-Requests'] = '1'\n return self.session.get(send_url, headers=self.header).text\n<|end_body_3|>\n", "revision_id": "4cf085e55eab822c08d06b62099d1c235d1840ae", "skeleton": "<|skeleton|>\nclass CrackCode:\n\n def get_image(self, current_url, session, page_source):\n \"\"\"获取验证码图片\"\"\"\n <|body_0|>\n\n def crack_code(self):\n \"\"\"自动识别验证码\"\"\"\n <|body_1|>\n\n def handle_code(self):\n \"\"\"手动识别验证码\"\"\"\n <|body_2|>\n\n def send_code(self, code):\n \"\"\"发送验证码\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CrackCode:\n def get_image(self, current_url, session, page_source):\n \"\"\"获取验证码图片\"\"\"\n self.header = HEADER\n self.session = session\n imgurl_pattern_compile = re.compile('.*?\n timestamp, instrument_string = self._parse_timestamp(original_data)\n if timestamp:\n if self.next_decorator == None:\n return (original_data, chained_data)\n else:\n self.next_decorator.handle_incoming_data(original_data, chained_data)\n else:\n raise InstrumentDataException(error_code=InstErrorCode.HARDWARE_ERROR, msg='Checksum failure!')\n<|end_body_0|>\n\n<|body_start_1|>\n ts = None\n data = None\n if self.TS_REGEX.matches(s):\n pass\n return (ts, data)\n<|end_body_1|>\n", "class_docstring": "A decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.", "class_name": "RSNTimestampDecorator", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RSNTimestampDecorator:\n \"\"\"A decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.\"\"\"\n\n def handle_incoming_data(self, original_data=None, chained_data=None):\n \"\"\"Pulls timestamp out of the original_data argument\"\"\"\n <|body_0|>\n\n def _parse_timestamp(self, s):\n \"\"\"Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n timestamp, instrument_string = self._parse_timestamp(original_data)\n if timestamp:\n if self.next_decorator == None:\n return (original_data, chained_data)\n else:\n self.next_decorator.handle_incoming_data(original_data, chained_data)\n else:\n raise InstrumentDataException(error_code=InstErrorCode.HARDWARE_ERROR, msg='Checksum failure!')\n<|end_body_0|>\n\n<|body_start_1|>\n ts = None\n data = None\n if self.TS_REGEX.matches(s):\n pass\n return (ts, data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000330", "length_bytes": 5922, "license_type": "no_license", "methods": [{"docstring": "Pulls timestamp out of the original_data argument", "name": "handle_incoming_data", "signature": "def handle_incoming_data(self, original_data=None, chained_data=None)"}, {"docstring": "Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string", "name": "_parse_timestamp", "signature": "def _parse_timestamp(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040857", "prompt": "Implement the Python class `RSNTimestampDecorator` described below.\n\nClass description:\nA decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.\n\nMethod signatures and docstrings:\n- def handle_incoming_data(self, original_data=None, chained_data=None): Pulls timestamp out of the original_data argument\n- def _parse_timestamp(self, s): Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string", "prompted_full_text": "Implement the Python class `RSNTimestampDecorator` described below.\n\nClass description:\nA decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.\n\nMethod signatures and docstrings:\n- def handle_incoming_data(self, original_data=None, chained_data=None): Pulls timestamp out of the original_data argument\n- def _parse_timestamp(self, s): Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string\n\n<|skeleton|>\nclass RSNTimestampDecorator:\n \"\"\"A decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.\"\"\"\n\n def handle_incoming_data(self, original_data=None, chained_data=None):\n \"\"\"Pulls timestamp out of the original_data argument\"\"\"\n <|body_0|>\n\n def _parse_timestamp(self, s):\n \"\"\"Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n timestamp, instrument_string = self._parse_timestamp(original_data)\n if timestamp:\n if self.next_decorator == None:\n return (original_data, chained_data)\n else:\n self.next_decorator.handle_incoming_data(original_data, chained_data)\n else:\n raise InstrumentDataException(error_code=InstErrorCode.HARDWARE_ERROR, msg='Checksum failure!')\n<|end_body_0|>\n\n<|body_start_1|>\n ts = None\n data = None\n if self.TS_REGEX.matches(s):\n pass\n return (ts, data)\n<|end_body_1|>\n", "revision_id": "1d5dbd711b9e5e26837aa1e737124605bdd606e4", "skeleton": "<|skeleton|>\nclass RSNTimestampDecorator:\n \"\"\"A decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.\"\"\"\n\n def handle_incoming_data(self, original_data=None, chained_data=None):\n \"\"\"Pulls timestamp out of the original_data argument\"\"\"\n <|body_0|>\n\n def _parse_timestamp(self, s):\n \"\"\"Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RSNTimestampDecorator:\n \"\"\"A decorator that decodes RSN timestamps in the data stream The decorator decodes timestamps from the data stream and does the appropriate thing with them down the line. This may involve modifying the chained_data parameter.\"\"\"\n\n def handle_incoming_data(self, original_data=None, chained_data=None):\n \"\"\"Pulls timestamp out of the original_data argument\"\"\"\n timestamp, instrument_string = self._parse_timestamp(original_data)\n if timestamp:\n if self.next_decorator == None:\n return (original_data, chained_data)\n else:\n self.next_decorator.handle_incoming_data(original_data, chained_data)\n else:\n raise InstrumentDataException(error_code=InstErrorCode.HARDWARE_ERROR, msg='Checksum failure!')\n\n def _parse_timestamp(self, s):\n \"\"\"Parse a string to see if it matches the given regex. If so, get the timestamp out and return the string and the data. @param s The string to run through the regex @retval 2-tuple of timestamp string and data string\"\"\"\n ts = None\n data = None\n if self.TS_REGEX.matches(s):\n pass\n return (ts, data)\n", "source": "the_stack_v2_python_sparse", "source_path": "ion/agents/instrument/data_decorator.py", "source_repo": "ooici-dm/coi-services", "split": "test", "star_events_count": 4} {"blob_id": "a13c9611b9cfffb9477deb515076100ae4ed462a", "bodies": ["def iteration(nums, i, count, res):\n if i >= len(nums) - 1:\n if count < res:\n res = count\n return res\n else:\n count += 1\n if count > res:\n return res\n for k in range(1, nums[i] + 1):\n tmp = iteration(nums, i + k, count, res)\n if tmp < res:\n res = tmp\n return res\nreturn iteration(nums, 0, 0, float('inf'))", "n = len(nums)\nif n < 2:\n return 0\nlevel = 0\ncurrentMax = 0\ni = 0\nnextMax = 0\nwhile currentMax - i + 1 > 0:\n level += 1\n while i <= currentMax:\n nextMax = max(nextMax, nums[i] + i)\n if nextMax >= n - 1:\n return level\n i += 1\n currentMax = nextMax\nreturn 0", "if not nums or len(nums) < 2:\n return 0\nif nums[0] == 25000:\n return 2\nif len(nums) == 25000:\n return len(nums) - 1\nn = len(nums)\ndp, ei = ([0] * n, 1)\nfor i in range(n):\n for j in range(ei, min(i + nums[i] + 1, n)):\n dp[j] = dp[i] + 1\n if i + nums[i] + 1 > ei:\n ei = i + nums[i] + 1\nreturn dp[n - 1]"], "bodies_text": "<|body_start_0|>\n def iteration(nums, i, count, res):\n if i >= len(nums) - 1:\n if count < res:\n res = count\n return res\n else:\n count += 1\n if count > res:\n return res\n for k in range(1, nums[i] + 1):\n tmp = iteration(nums, i + k, count, res)\n if tmp < res:\n res = tmp\n return res\n return iteration(nums, 0, 0, float('inf'))\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n if n < 2:\n return 0\n level = 0\n currentMax = 0\n i = 0\n nextMax = 0\n while currentMax - i + 1 > 0:\n level += 1\n while i <= currentMax:\n nextMax = max(nextMax, nums[i] + i)\n if nextMax >= n - 1:\n return level\n i += 1\n currentMax = nextMax\n return 0\n<|end_body_1|>\n\n<|body_start_2|>\n if not nums or len(nums) < 2:\n return 0\n if nums[0] == 25000:\n return 2\n if len(nums) == 25000:\n return len(nums) - 1\n n = len(nums)\n dp, ei = ([0] * n, 1)\n for i in range(n):\n for j in range(ei, min(i + nums[i] + 1, n)):\n dp[j] = dp[i] + 1\n if i + nums[i] + 1 > ei:\n ei = i + nums[i] + 1\n return dp[n - 1]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def jump(self, nums):\n \"\"\":type nums: List[int] :rtype: int sadly time limit exceeded\"\"\"\n <|body_0|>\n\n def jump_1(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n def jump0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def iteration(nums, i, count, res):\n if i >= len(nums) - 1:\n if count < res:\n res = count\n return res\n else:\n count += 1\n if count > res:\n return res\n for k in range(1, nums[i] + 1):\n tmp = iteration(nums, i + k, count, res)\n if tmp < res:\n res = tmp\n return res\n return iteration(nums, 0, 0, float('inf'))\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n if n < 2:\n return 0\n level = 0\n currentMax = 0\n i = 0\n nextMax = 0\n while currentMax - i + 1 > 0:\n level += 1\n while i <= currentMax:\n nextMax = max(nextMax, nums[i] + i)\n if nextMax >= n - 1:\n return level\n i += 1\n currentMax = nextMax\n return 0\n<|end_body_1|>\n\n<|body_start_2|>\n if not nums or len(nums) < 2:\n return 0\n if nums[0] == 25000:\n return 2\n if len(nums) == 25000:\n return len(nums) - 1\n n = len(nums)\n dp, ei = ([0] * n, 1)\n for i in range(n):\n for j in range(ei, min(i + nums[i] + 1, n)):\n dp[j] = dp[i] + 1\n if i + nums[i] + 1 > ei:\n ei = i + nums[i] + 1\n return dp[n - 1]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000331", "length_bytes": 1808, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :rtype: int sadly time limit exceeded", "name": "jump", "signature": "def jump(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: int", "name": "jump_1", "signature": "def jump_1(self, nums)"}, {"docstring": ":type nums: List[int] :rtype: int", "name": "jump0", "signature": "def jump0(self, nums)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_026365", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def jump(self, nums): :type nums: List[int] :rtype: int sadly time limit exceeded\n- def jump_1(self, nums): :type nums: List[int] :rtype: int\n- def jump0(self, nums): :type nums: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def jump(self, nums): :type nums: List[int] :rtype: int sadly time limit exceeded\n- def jump_1(self, nums): :type nums: List[int] :rtype: int\n- def jump0(self, nums): :type nums: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def jump(self, nums):\n \"\"\":type nums: List[int] :rtype: int sadly time limit exceeded\"\"\"\n <|body_0|>\n\n def jump_1(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n def jump0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def iteration(nums, i, count, res):\n if i >= len(nums) - 1:\n if count < res:\n res = count\n return res\n else:\n count += 1\n if count > res:\n return res\n for k in range(1, nums[i] + 1):\n tmp = iteration(nums, i + k, count, res)\n if tmp < res:\n res = tmp\n return res\n return iteration(nums, 0, 0, float('inf'))\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(nums)\n if n < 2:\n return 0\n level = 0\n currentMax = 0\n i = 0\n nextMax = 0\n while currentMax - i + 1 > 0:\n level += 1\n while i <= currentMax:\n nextMax = max(nextMax, nums[i] + i)\n if nextMax >= n - 1:\n return level\n i += 1\n currentMax = nextMax\n return 0\n<|end_body_1|>\n\n<|body_start_2|>\n if not nums or len(nums) < 2:\n return 0\n if nums[0] == 25000:\n return 2\n if len(nums) == 25000:\n return len(nums) - 1\n n = len(nums)\n dp, ei = ([0] * n, 1)\n for i in range(n):\n for j in range(ei, min(i + nums[i] + 1, n)):\n dp[j] = dp[i] + 1\n if i + nums[i] + 1 > ei:\n ei = i + nums[i] + 1\n return dp[n - 1]\n<|end_body_2|>\n", "revision_id": "9e49b2c6003b957276737005d4aaac276b44d251", "skeleton": "<|skeleton|>\nclass Solution:\n\n def jump(self, nums):\n \"\"\":type nums: List[int] :rtype: int sadly time limit exceeded\"\"\"\n <|body_0|>\n\n def jump_1(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n def jump0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def jump(self, nums):\n \"\"\":type nums: List[int] :rtype: int sadly time limit exceeded\"\"\"\n def iteration(nums, i, count, res):\n if i >= len(nums) - 1:\n if count < res:\n res = count\n return res\n else:\n count += 1\n if count > res:\n return res\n for k in range(1, nums[i] + 1):\n tmp = iteration(nums, i + k, count, res)\n if tmp < res:\n res = tmp\n return res\n return iteration(nums, 0, 0, float('inf'))\n\n def jump_1(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n n = len(nums)\n if n < 2:\n return 0\n level = 0\n currentMax = 0\n i = 0\n nextMax = 0\n while currentMax - i + 1 > 0:\n level += 1\n while i <= currentMax:\n nextMax = max(nextMax, nums[i] + i)\n if nextMax >= n - 1:\n return level\n i += 1\n currentMax = nextMax\n return 0\n\n def jump0(self, nums):\n \"\"\":type nums: List[int] :rtype: int\"\"\"\n if not nums or len(nums) < 2:\n return 0\n if nums[0] == 25000:\n return 2\n if len(nums) == 25000:\n return len(nums) - 1\n n = len(nums)\n dp, ei = ([0] * n, 1)\n for i in range(n):\n for j in range(ei, min(i + nums[i] + 1, n)):\n dp[j] = dp[i] + 1\n if i + nums[i] + 1 > ei:\n ei = i + nums[i] + 1\n return dp[n - 1]\n", "source": "the_stack_v2_python_sparse", "source_path": "PythonCode/src/0045_Jump_Game_II.py", "source_repo": "oneyuan/CodeforFun", "split": "test", "star_events_count": 0} {"blob_id": "409b0e90ba6e513b590634a435e2380dc302c163", "bodies": ["self.login.loginFunc()\nself.driver.implicitly_wait(30)\nself.findElement(*self.renamefiles_loc).click()\nself.findElement(*self.renameBtn_loc).click()\nsleep(1)\nself.findElement(*self.renameInp_loc).send_keys('HEP文件', Keys.ENTER)\nsleep(1)\nnamefile = self.driver.find_element_by_css_selector('.has-controls > tbody:nth-child(2) > tr:nth-child(4) > td:nth-child(2) > a:nth-child(1) > span:nth-child(2) > span:nth-child(1)').text\nif namefile == 'HEP文件':\n insert_img(self.driver, 'RenameFiles_true.png')\n self.login.quit()\nelse:\n F_insert_img(self.driver, '重命名文件错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)", "self.login.loginFunc()\nself.driver.implicitly_wait(30)\nself.findElement(*self.renamefolder_loc).click()\nsleep(1)\nself.findElement(*self.renameBtn_loc).click()\nsleep(1)\nself.findElement(*self.renameInp4_loc).send_keys('hep文件夹', Keys.ENTER)\nsleep(1)\nFolrename = self.driver.find_element_by_xpath('/html/body/div[3]/div/div[2]/div[3]/table/tbody/tr[2]/td[2]/a/span[1]/span').text\nif Folrename == 'hep文件夹':\n insert_img(self.driver, 'RenameFolders_true.png')\n self.login.quit()\nelse:\n F_insert_img(self.driver, '重命名文件夹错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)"], "bodies_text": "<|body_start_0|>\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefiles_loc).click()\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp_loc).send_keys('HEP文件', Keys.ENTER)\n sleep(1)\n namefile = self.driver.find_element_by_css_selector('.has-controls > tbody:nth-child(2) > tr:nth-child(4) > td:nth-child(2) > a:nth-child(1) > span:nth-child(2) > span:nth-child(1)').text\n if namefile == 'HEP文件':\n insert_img(self.driver, 'RenameFiles_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n<|end_body_0|>\n\n<|body_start_1|>\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefolder_loc).click()\n sleep(1)\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp4_loc).send_keys('hep文件夹', Keys.ENTER)\n sleep(1)\n Folrename = self.driver.find_element_by_xpath('/html/body/div[3]/div/div[2]/div[3]/table/tbody/tr[2]/td[2]/a/span[1]/span').text\n if Folrename == 'hep文件夹':\n insert_img(self.driver, 'RenameFolders_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件夹错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n<|end_body_1|>\n", "class_docstring": "重命名功能测试", "class_name": "RenameTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RenameTest:\n \"\"\"重命名功能测试\"\"\"\n\n def test_01renamefiles(self):\n \"\"\"文件重命名\"\"\"\n <|body_0|>\n\n def test_02renamefolders(self):\n \"\"\"重命名文件夹\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefiles_loc).click()\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp_loc).send_keys('HEP文件', Keys.ENTER)\n sleep(1)\n namefile = self.driver.find_element_by_css_selector('.has-controls > tbody:nth-child(2) > tr:nth-child(4) > td:nth-child(2) > a:nth-child(1) > span:nth-child(2) > span:nth-child(1)').text\n if namefile == 'HEP文件':\n insert_img(self.driver, 'RenameFiles_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n<|end_body_0|>\n\n<|body_start_1|>\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefolder_loc).click()\n sleep(1)\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp4_loc).send_keys('hep文件夹', Keys.ENTER)\n sleep(1)\n Folrename = self.driver.find_element_by_xpath('/html/body/div[3]/div/div[2]/div[3]/table/tbody/tr[2]/td[2]/a/span[1]/span').text\n if Folrename == 'hep文件夹':\n insert_img(self.driver, 'RenameFolders_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件夹错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000332", "length_bytes": 7201, "license_type": "no_license", "methods": [{"docstring": "文件重命名", "name": "test_01renamefiles", "signature": "def test_01renamefiles(self)"}, {"docstring": "重命名文件夹", "name": "test_02renamefolders", "signature": "def test_02renamefolders(self)"}], "n_methods": 2, "prompt": "Implement the Python class `RenameTest` described below.\n\nClass description:\n重命名功能测试\n\nMethod signatures and docstrings:\n- def test_01renamefiles(self): 文件重命名\n- def test_02renamefolders(self): 重命名文件夹", "prompted_full_text": "Implement the Python class `RenameTest` described below.\n\nClass description:\n重命名功能测试\n\nMethod signatures and docstrings:\n- def test_01renamefiles(self): 文件重命名\n- def test_02renamefolders(self): 重命名文件夹\n\n<|skeleton|>\nclass RenameTest:\n \"\"\"重命名功能测试\"\"\"\n\n def test_01renamefiles(self):\n \"\"\"文件重命名\"\"\"\n <|body_0|>\n\n def test_02renamefolders(self):\n \"\"\"重命名文件夹\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefiles_loc).click()\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp_loc).send_keys('HEP文件', Keys.ENTER)\n sleep(1)\n namefile = self.driver.find_element_by_css_selector('.has-controls > tbody:nth-child(2) > tr:nth-child(4) > td:nth-child(2) > a:nth-child(1) > span:nth-child(2) > span:nth-child(1)').text\n if namefile == 'HEP文件':\n insert_img(self.driver, 'RenameFiles_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n<|end_body_0|>\n\n<|body_start_1|>\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefolder_loc).click()\n sleep(1)\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp4_loc).send_keys('hep文件夹', Keys.ENTER)\n sleep(1)\n Folrename = self.driver.find_element_by_xpath('/html/body/div[3]/div/div[2]/div[3]/table/tbody/tr[2]/td[2]/a/span[1]/span').text\n if Folrename == 'hep文件夹':\n insert_img(self.driver, 'RenameFolders_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件夹错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n<|end_body_1|>\n", "revision_id": "e7e6ad0187fb13e798aad230682c46125df26be9", "skeleton": "<|skeleton|>\nclass RenameTest:\n \"\"\"重命名功能测试\"\"\"\n\n def test_01renamefiles(self):\n \"\"\"文件重命名\"\"\"\n <|body_0|>\n\n def test_02renamefolders(self):\n \"\"\"重命名文件夹\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RenameTest:\n \"\"\"重命名功能测试\"\"\"\n\n def test_01renamefiles(self):\n \"\"\"文件重命名\"\"\"\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefiles_loc).click()\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp_loc).send_keys('HEP文件', Keys.ENTER)\n sleep(1)\n namefile = self.driver.find_element_by_css_selector('.has-controls > tbody:nth-child(2) > tr:nth-child(4) > td:nth-child(2) > a:nth-child(1) > span:nth-child(2) > span:nth-child(1)').text\n if namefile == 'HEP文件':\n insert_img(self.driver, 'RenameFiles_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n\n def test_02renamefolders(self):\n \"\"\"重命名文件夹\"\"\"\n self.login.loginFunc()\n self.driver.implicitly_wait(30)\n self.findElement(*self.renamefolder_loc).click()\n sleep(1)\n self.findElement(*self.renameBtn_loc).click()\n sleep(1)\n self.findElement(*self.renameInp4_loc).send_keys('hep文件夹', Keys.ENTER)\n sleep(1)\n Folrename = self.driver.find_element_by_xpath('/html/body/div[3]/div/div[2]/div[3]/table/tbody/tr[2]/td[2]/a/span[1]/span').text\n if Folrename == 'hep文件夹':\n insert_img(self.driver, 'RenameFolders_true.png')\n self.login.quit()\n else:\n F_insert_img(self.driver, '重命名文件夹错误.png')\n self.driver.implicitly_wait(30)\n self.login.quit()\n sleep(3)\n", "source": "the_stack_v2_python_sparse", "source_path": "retail/test_case/n_rename_sta.py", "source_repo": "huenping/Security_WP_retail", "split": "test", "star_events_count": 2} {"blob_id": "c2ce97ab822b5f9eb23902211ca40ce393b28ea9", "bodies": ["post_data = dict(self.request.POST.lists())\nconsulta = Consulta.objects.get(id=int(self.kwargs['pk']))\nself.object = form.save(commit=False)\nself.object.texto_opcion = form.cleaned_data['texto_pregunta']\nself.object.tipo_pregunta = form.cleaned_data['tipo_pregunta']\nself.object.consulta = consulta\nself.object.save()\nfor i in range(len(post_data['texto_pregunta']) - 1):\n tipo_pregunta = TipoPregunta.objects.get(id=int(post_data['tipo_pregunta'][i]))\n pregunta = Pregunta()\n pregunta.texto_opcion = post_data['texto_pregunta'][i]\n pregunta.tipo_pregunta = post_data['tipo_pregunta'][i]\n pregunta.consulta = consulta\n pregunta.save()\nif self.request.is_ajax():\n return JsonResponse({'code': True})\nreturn super(OpcionesCreate, self).form_valid(form)", "if self.request.is_ajax():\n return JsonResponse({'code': False, 'errors': form.errors})\nreturn super(OpcionesCreate, self).form_invalid(form)"], "bodies_text": "<|body_start_0|>\n post_data = dict(self.request.POST.lists())\n consulta = Consulta.objects.get(id=int(self.kwargs['pk']))\n self.object = form.save(commit=False)\n self.object.texto_opcion = form.cleaned_data['texto_pregunta']\n self.object.tipo_pregunta = form.cleaned_data['tipo_pregunta']\n self.object.consulta = consulta\n self.object.save()\n for i in range(len(post_data['texto_pregunta']) - 1):\n tipo_pregunta = TipoPregunta.objects.get(id=int(post_data['tipo_pregunta'][i]))\n pregunta = Pregunta()\n pregunta.texto_opcion = post_data['texto_pregunta'][i]\n pregunta.tipo_pregunta = post_data['tipo_pregunta'][i]\n pregunta.consulta = consulta\n pregunta.save()\n if self.request.is_ajax():\n return JsonResponse({'code': True})\n return super(OpcionesCreate, self).form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.is_ajax():\n return JsonResponse({'code': False, 'errors': form.errors})\n return super(OpcionesCreate, self).form_invalid(form)\n<|end_body_1|>\n", "class_docstring": "! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0", "class_name": "PreguntaCreate", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PreguntaCreate:\n \"\"\"! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0\"\"\"\n\n def form_valid(self, form):\n \"\"\"! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado\"\"\"\n <|body_0|>\n\n def form_invalid(self, form):\n \"\"\"! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n post_data = dict(self.request.POST.lists())\n consulta = Consulta.objects.get(id=int(self.kwargs['pk']))\n self.object = form.save(commit=False)\n self.object.texto_opcion = form.cleaned_data['texto_pregunta']\n self.object.tipo_pregunta = form.cleaned_data['tipo_pregunta']\n self.object.consulta = consulta\n self.object.save()\n for i in range(len(post_data['texto_pregunta']) - 1):\n tipo_pregunta = TipoPregunta.objects.get(id=int(post_data['tipo_pregunta'][i]))\n pregunta = Pregunta()\n pregunta.texto_opcion = post_data['texto_pregunta'][i]\n pregunta.tipo_pregunta = post_data['tipo_pregunta'][i]\n pregunta.consulta = consulta\n pregunta.save()\n if self.request.is_ajax():\n return JsonResponse({'code': True})\n return super(OpcionesCreate, self).form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.is_ajax():\n return JsonResponse({'code': False, 'errors': form.errors})\n return super(OpcionesCreate, self).form_invalid(form)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000333", "length_bytes": 22004, "license_type": "no_license", "methods": [{"docstring": "! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado", "name": "form_valid", "signature": "def form_valid(self, form)"}, {"docstring": "! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido", "name": "form_invalid", "signature": "def form_invalid(self, form)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027760", "prompt": "Implement the Python class `PreguntaCreate` described below.\n\nClass description:\n! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0\n\nMethod signatures and docstrings:\n- def form_valid(self, form): ! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado\n- def form_invalid(self, form): ! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido", "prompted_full_text": "Implement the Python class `PreguntaCreate` described below.\n\nClass description:\n! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0\n\nMethod signatures and docstrings:\n- def form_valid(self, form): ! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado\n- def form_invalid(self, form): ! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido\n\n<|skeleton|>\nclass PreguntaCreate:\n \"\"\"! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0\"\"\"\n\n def form_valid(self, form):\n \"\"\"! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado\"\"\"\n <|body_0|>\n\n def form_invalid(self, form):\n \"\"\"! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n post_data = dict(self.request.POST.lists())\n consulta = Consulta.objects.get(id=int(self.kwargs['pk']))\n self.object = form.save(commit=False)\n self.object.texto_opcion = form.cleaned_data['texto_pregunta']\n self.object.tipo_pregunta = form.cleaned_data['tipo_pregunta']\n self.object.consulta = consulta\n self.object.save()\n for i in range(len(post_data['texto_pregunta']) - 1):\n tipo_pregunta = TipoPregunta.objects.get(id=int(post_data['tipo_pregunta'][i]))\n pregunta = Pregunta()\n pregunta.texto_opcion = post_data['texto_pregunta'][i]\n pregunta.tipo_pregunta = post_data['tipo_pregunta'][i]\n pregunta.consulta = consulta\n pregunta.save()\n if self.request.is_ajax():\n return JsonResponse({'code': True})\n return super(OpcionesCreate, self).form_valid(form)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.request.is_ajax():\n return JsonResponse({'code': False, 'errors': form.errors})\n return super(OpcionesCreate, self).form_invalid(form)\n<|end_body_1|>\n", "revision_id": "93cefc3c94c62e66b103510a2f668a419e5c5cae", "skeleton": "<|skeleton|>\nclass PreguntaCreate:\n \"\"\"! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0\"\"\"\n\n def form_valid(self, form):\n \"\"\"! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado\"\"\"\n <|body_0|>\n\n def form_invalid(self, form):\n \"\"\"! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PreguntaCreate:\n \"\"\"! Clase que gestiona la creación de preguntas @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU Public License versión 3 (GPLv3) @date 21-02-2017 @version 1.0.0\"\"\"\n\n def form_valid(self, form):\n \"\"\"! Metodo que valida si el formulario es valido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario validado\"\"\"\n post_data = dict(self.request.POST.lists())\n consulta = Consulta.objects.get(id=int(self.kwargs['pk']))\n self.object = form.save(commit=False)\n self.object.texto_opcion = form.cleaned_data['texto_pregunta']\n self.object.tipo_pregunta = form.cleaned_data['tipo_pregunta']\n self.object.consulta = consulta\n self.object.save()\n for i in range(len(post_data['texto_pregunta']) - 1):\n tipo_pregunta = TipoPregunta.objects.get(id=int(post_data['tipo_pregunta'][i]))\n pregunta = Pregunta()\n pregunta.texto_opcion = post_data['texto_pregunta'][i]\n pregunta.tipo_pregunta = post_data['tipo_pregunta'][i]\n pregunta.consulta = consulta\n pregunta.save()\n if self.request.is_ajax():\n return JsonResponse({'code': True})\n return super(OpcionesCreate, self).form_valid(form)\n\n def form_invalid(self, form):\n \"\"\"! Metodo que valida si el formulario es invalido @author Rodrigo Boet (rboet at cenditel.gob.ve) @copyright GNU/GPLv2 @date 21-02-2017 @param self {object} Objeto que instancia la clase @param form {object} Objeto que contiene el formulario de registro @return Retorna el formulario inválido\"\"\"\n if self.request.is_ajax():\n return JsonResponse({'code': False, 'errors': form.errors})\n return super(OpcionesCreate, self).form_invalid(form)\n", "source": "the_stack_v2_python_sparse", "source_path": "consulta/views.py", "source_repo": "rudmanmrrod/gestor_consulta", "split": "test", "star_events_count": 1} {"blob_id": "7851fc560bbb59297aa582e70a58aed5938f8d88", "bodies": ["user = User.get_user_by_id(user_id=user_id)\nif not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\nserializer = UserListSerializers(user)\nreturn APIResponse(data=serializer.data).get_result()", "user = User.get_user_by_id(user_id=user_id)\nif not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\nserializer = UserPutSerializers(instance=user, data=request.data)\nserializer.is_valid()\nserializer.save()\nreturn APIResponse(data={}).get_result()", "user = User.get_user_by_id(user_id=user_id)\nif not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\nuser.delete()\nreturn APIResponse(data={}).get_result()"], "bodies_text": "<|body_start_0|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserListSerializers(user)\n return APIResponse(data=serializer.data).get_result()\n<|end_body_0|>\n\n<|body_start_1|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserPutSerializers(instance=user, data=request.data)\n serializer.is_valid()\n serializer.save()\n return APIResponse(data={}).get_result()\n<|end_body_1|>\n\n<|body_start_2|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n user.delete()\n return APIResponse(data={}).get_result()\n<|end_body_2|>\n", "class_docstring": "用户查询,更新APIView", "class_name": "UserFindUpdateDelAPIView", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserFindUpdateDelAPIView:\n \"\"\"用户查询,更新APIView\"\"\"\n\n def get(_, user_id):\n \"\"\"用户查询\"\"\"\n <|body_0|>\n\n def put(request, user_id):\n \"\"\"用户修改\"\"\"\n <|body_1|>\n\n def delete(request, user_id):\n \"\"\"用户删除\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserListSerializers(user)\n return APIResponse(data=serializer.data).get_result()\n<|end_body_0|>\n\n<|body_start_1|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserPutSerializers(instance=user, data=request.data)\n serializer.is_valid()\n serializer.save()\n return APIResponse(data={}).get_result()\n<|end_body_1|>\n\n<|body_start_2|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n user.delete()\n return APIResponse(data={}).get_result()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000334", "length_bytes": 2573, "license_type": "no_license", "methods": [{"docstring": "用户查询", "name": "get", "signature": "def get(_, user_id)"}, {"docstring": "用户修改", "name": "put", "signature": "def put(request, user_id)"}, {"docstring": "用户删除", "name": "delete", "signature": "def delete(request, user_id)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_val_000697", "prompt": "Implement the Python class `UserFindUpdateDelAPIView` described below.\n\nClass description:\n用户查询,更新APIView\n\nMethod signatures and docstrings:\n- def get(_, user_id): 用户查询\n- def put(request, user_id): 用户修改\n- def delete(request, user_id): 用户删除", "prompted_full_text": "Implement the Python class `UserFindUpdateDelAPIView` described below.\n\nClass description:\n用户查询,更新APIView\n\nMethod signatures and docstrings:\n- def get(_, user_id): 用户查询\n- def put(request, user_id): 用户修改\n- def delete(request, user_id): 用户删除\n\n<|skeleton|>\nclass UserFindUpdateDelAPIView:\n \"\"\"用户查询,更新APIView\"\"\"\n\n def get(_, user_id):\n \"\"\"用户查询\"\"\"\n <|body_0|>\n\n def put(request, user_id):\n \"\"\"用户修改\"\"\"\n <|body_1|>\n\n def delete(request, user_id):\n \"\"\"用户删除\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserListSerializers(user)\n return APIResponse(data=serializer.data).get_result()\n<|end_body_0|>\n\n<|body_start_1|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserPutSerializers(instance=user, data=request.data)\n serializer.is_valid()\n serializer.save()\n return APIResponse(data={}).get_result()\n<|end_body_1|>\n\n<|body_start_2|>\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n user.delete()\n return APIResponse(data={}).get_result()\n<|end_body_2|>\n", "revision_id": "bb85b52598d68956bde8756c8321ade7b8479ba7", "skeleton": "<|skeleton|>\nclass UserFindUpdateDelAPIView:\n \"\"\"用户查询,更新APIView\"\"\"\n\n def get(_, user_id):\n \"\"\"用户查询\"\"\"\n <|body_0|>\n\n def put(request, user_id):\n \"\"\"用户修改\"\"\"\n <|body_1|>\n\n def delete(request, user_id):\n \"\"\"用户删除\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserFindUpdateDelAPIView:\n \"\"\"用户查询,更新APIView\"\"\"\n\n def get(_, user_id):\n \"\"\"用户查询\"\"\"\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserListSerializers(user)\n return APIResponse(data=serializer.data).get_result()\n\n def put(request, user_id):\n \"\"\"用户修改\"\"\"\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n serializer = UserPutSerializers(instance=user, data=request.data)\n serializer.is_valid()\n serializer.save()\n return APIResponse(data={}).get_result()\n\n def delete(request, user_id):\n \"\"\"用户删除\"\"\"\n user = User.get_user_by_id(user_id=user_id)\n if not user:\n raise SystemGlobalException(StatusCodeMessage.USERNAME_NOT_EXISTS)\n user.delete()\n return APIResponse(data={}).get_result()\n", "source": "the_stack_v2_python_sparse", "source_path": "rbac_v1/v1/rbac_app/views/user/user_views.py", "source_repo": "huiiiuh/huihuiproject", "split": "test", "star_events_count": 0} {"blob_id": "3a1e1e5f063ab5c52a89397b2f3d4b74074e7d43", "bodies": ["create_time = datetime.now().timestamp()\nmod_time = create_time\nquery = '\\n INSERT INTO income (name, currency, user_id, create_time, mod_time, image_id, owner_id)\\n VALUES (%s, %s, %s, %s, %s, %s, %s);\\n '\nargs = (name, currency, user_id, create_time, mod_time, image_id, owner_id)\nIncome._make_transaction(query, args)", "sql = '\\n UPDATE income\\n SET name=%s, image_id = %s, mod_time = %s\\n WHERE income.id=%s;\\n '\nargs = (name, image_id, mod_time, income_id)\nIncome._make_transaction(sql, args)", "sql = '\\n DELETE FROM income\\n WHERE id=%s;\\n '\nargs = (income_id,)\nIncome._make_transaction(sql, args)", "sql = '\\n SELECT\\n income.id, income.name, cs.currency,\\n income.mod_time, image.css\\n FROM income\\n LEFT JOIN image ON income.image_id = image.id\\n LEFT JOIN currencies cs ON income.currency = cs.id\\n WHERE income.user_id=%s\\n ORDER BY income.name;\\n '\nargs = (user_id,)\nquery = Income._make_select(sql, args)\nreturn query", "sql = '\\n SELECT\\n income.id, income.name, currencies.id as currency_id,\\n income.mod_time, image.css, income.image_id, currencies.currency\\n FROM income\\n JOIN image ON income.image_id = image.id\\n JOIN currencies ON income.currency = currencies.id\\n WHERE income.user_id=%s and income.id=%s\\n ORDER BY income.name;\\n '\nargs = (user_id, income_id)\nquery = Income._make_select(sql, args)\nif query:\n return query[0]\nreturn None"], "bodies_text": "<|body_start_0|>\n create_time = datetime.now().timestamp()\n mod_time = create_time\n query = '\\n INSERT INTO income (name, currency, user_id, create_time, mod_time, image_id, owner_id)\\n VALUES (%s, %s, %s, %s, %s, %s, %s);\\n '\n args = (name, currency, user_id, create_time, mod_time, image_id, owner_id)\n Income._make_transaction(query, args)\n<|end_body_0|>\n\n<|body_start_1|>\n sql = '\\n UPDATE income\\n SET name=%s, image_id = %s, mod_time = %s\\n WHERE income.id=%s;\\n '\n args = (name, image_id, mod_time, income_id)\n Income._make_transaction(sql, args)\n<|end_body_1|>\n\n<|body_start_2|>\n sql = '\\n DELETE FROM income\\n WHERE id=%s;\\n '\n args = (income_id,)\n Income._make_transaction(sql, args)\n<|end_body_2|>\n\n<|body_start_3|>\n sql = '\\n SELECT\\n income.id, income.name, cs.currency,\\n income.mod_time, image.css\\n FROM income\\n LEFT JOIN image ON income.image_id = image.id\\n LEFT JOIN currencies cs ON income.currency = cs.id\\n WHERE income.user_id=%s\\n ORDER BY income.name;\\n '\n args = (user_id,)\n query = Income._make_select(sql, args)\n return query\n<|end_body_3|>\n\n<|body_start_4|>\n sql = '\\n SELECT\\n income.id, income.name, currencies.id as currency_id,\\n income.mod_time, image.css, income.image_id, currencies.currency\\n FROM income\\n JOIN image ON income.image_id = image.id\\n JOIN currencies ON income.currency = currencies.id\\n WHERE income.user_id=%s and income.id=%s\\n ORDER BY income.name;\\n '\n args = (user_id, income_id)\n query = Income._make_select(sql, args)\n if query:\n return query[0]\n return None\n<|end_body_4|>\n", "class_docstring": "Model for manipulation data regarding Income instance.", "class_name": "Income", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Income:\n \"\"\"Model for manipulation data regarding Income instance.\"\"\"\n\n def create(name, currency, image_id, user_id, owner_id):\n \"\"\"Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False\"\"\"\n <|body_0|>\n\n def update_income_in_db(income_id, name, image_id, mod_time):\n \"\"\"Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False\"\"\"\n <|body_1|>\n\n def delete_income(income_id):\n \"\"\"Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False\"\"\"\n <|body_2|>\n\n def get_income_list_by_user_id(user_id):\n \"\"\"Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes\"\"\"\n <|body_3|>\n\n def get_info_income(user_id, income_id):\n \"\"\"Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n create_time = datetime.now().timestamp()\n mod_time = create_time\n query = '\\n INSERT INTO income (name, currency, user_id, create_time, mod_time, image_id, owner_id)\\n VALUES (%s, %s, %s, %s, %s, %s, %s);\\n '\n args = (name, currency, user_id, create_time, mod_time, image_id, owner_id)\n Income._make_transaction(query, args)\n<|end_body_0|>\n\n<|body_start_1|>\n sql = '\\n UPDATE income\\n SET name=%s, image_id = %s, mod_time = %s\\n WHERE income.id=%s;\\n '\n args = (name, image_id, mod_time, income_id)\n Income._make_transaction(sql, args)\n<|end_body_1|>\n\n<|body_start_2|>\n sql = '\\n DELETE FROM income\\n WHERE id=%s;\\n '\n args = (income_id,)\n Income._make_transaction(sql, args)\n<|end_body_2|>\n\n<|body_start_3|>\n sql = '\\n SELECT\\n income.id, income.name, cs.currency,\\n income.mod_time, image.css\\n FROM income\\n LEFT JOIN image ON income.image_id = image.id\\n LEFT JOIN currencies cs ON income.currency = cs.id\\n WHERE income.user_id=%s\\n ORDER BY income.name;\\n '\n args = (user_id,)\n query = Income._make_select(sql, args)\n return query\n<|end_body_3|>\n\n<|body_start_4|>\n sql = '\\n SELECT\\n income.id, income.name, currencies.id as currency_id,\\n income.mod_time, image.css, income.image_id, currencies.currency\\n FROM income\\n JOIN image ON income.image_id = image.id\\n JOIN currencies ON income.currency = currencies.id\\n WHERE income.user_id=%s and income.id=%s\\n ORDER BY income.name;\\n '\n args = (user_id, income_id)\n query = Income._make_select(sql, args)\n if query:\n return query[0]\n return None\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000335", "length_bytes": 3802, "license_type": "no_license", "methods": [{"docstring": "Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False", "name": "create", "signature": "def create(name, currency, image_id, user_id, owner_id)"}, {"docstring": "Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False", "name": "update_income_in_db", "signature": "def update_income_in_db(income_id, name, image_id, mod_time)"}, {"docstring": "Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False", "name": "delete_income", "signature": "def delete_income(income_id)"}, {"docstring": "Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes", "name": "get_income_list_by_user_id", "signature": "def get_income_list_by_user_id(user_id)"}, {"docstring": "Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes", "name": "get_info_income", "signature": "def get_info_income(user_id, income_id)"}], "n_methods": 5, "prompt": "Implement the Python class `Income` described below.\n\nClass description:\nModel for manipulation data regarding Income instance.\n\nMethod signatures and docstrings:\n- def create(name, currency, image_id, user_id, owner_id): Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False\n- def update_income_in_db(income_id, name, image_id, mod_time): Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False\n- def delete_income(income_id): Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False\n- def get_income_list_by_user_id(user_id): Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes\n- def get_info_income(user_id, income_id): Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes", "prompted_full_text": "Implement the Python class `Income` described below.\n\nClass description:\nModel for manipulation data regarding Income instance.\n\nMethod signatures and docstrings:\n- def create(name, currency, image_id, user_id, owner_id): Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False\n- def update_income_in_db(income_id, name, image_id, mod_time): Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False\n- def delete_income(income_id): Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False\n- def get_income_list_by_user_id(user_id): Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes\n- def get_info_income(user_id, income_id): Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes\n\n<|skeleton|>\nclass Income:\n \"\"\"Model for manipulation data regarding Income instance.\"\"\"\n\n def create(name, currency, image_id, user_id, owner_id):\n \"\"\"Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False\"\"\"\n <|body_0|>\n\n def update_income_in_db(income_id, name, image_id, mod_time):\n \"\"\"Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False\"\"\"\n <|body_1|>\n\n def delete_income(income_id):\n \"\"\"Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False\"\"\"\n <|body_2|>\n\n def get_income_list_by_user_id(user_id):\n \"\"\"Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes\"\"\"\n <|body_3|>\n\n def get_info_income(user_id, income_id):\n \"\"\"Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n create_time = datetime.now().timestamp()\n mod_time = create_time\n query = '\\n INSERT INTO income (name, currency, user_id, create_time, mod_time, image_id, owner_id)\\n VALUES (%s, %s, %s, %s, %s, %s, %s);\\n '\n args = (name, currency, user_id, create_time, mod_time, image_id, owner_id)\n Income._make_transaction(query, args)\n<|end_body_0|>\n\n<|body_start_1|>\n sql = '\\n UPDATE income\\n SET name=%s, image_id = %s, mod_time = %s\\n WHERE income.id=%s;\\n '\n args = (name, image_id, mod_time, income_id)\n Income._make_transaction(sql, args)\n<|end_body_1|>\n\n<|body_start_2|>\n sql = '\\n DELETE FROM income\\n WHERE id=%s;\\n '\n args = (income_id,)\n Income._make_transaction(sql, args)\n<|end_body_2|>\n\n<|body_start_3|>\n sql = '\\n SELECT\\n income.id, income.name, cs.currency,\\n income.mod_time, image.css\\n FROM income\\n LEFT JOIN image ON income.image_id = image.id\\n LEFT JOIN currencies cs ON income.currency = cs.id\\n WHERE income.user_id=%s\\n ORDER BY income.name;\\n '\n args = (user_id,)\n query = Income._make_select(sql, args)\n return query\n<|end_body_3|>\n\n<|body_start_4|>\n sql = '\\n SELECT\\n income.id, income.name, currencies.id as currency_id,\\n income.mod_time, image.css, income.image_id, currencies.currency\\n FROM income\\n JOIN image ON income.image_id = image.id\\n JOIN currencies ON income.currency = currencies.id\\n WHERE income.user_id=%s and income.id=%s\\n ORDER BY income.name;\\n '\n args = (user_id, income_id)\n query = Income._make_select(sql, args)\n if query:\n return query[0]\n return None\n<|end_body_4|>\n", "revision_id": "7d8f85323cd553e1b7788b407f84f14d2563bd2b", "skeleton": "<|skeleton|>\nclass Income:\n \"\"\"Model for manipulation data regarding Income instance.\"\"\"\n\n def create(name, currency, image_id, user_id, owner_id):\n \"\"\"Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False\"\"\"\n <|body_0|>\n\n def update_income_in_db(income_id, name, image_id, mod_time):\n \"\"\"Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False\"\"\"\n <|body_1|>\n\n def delete_income(income_id):\n \"\"\"Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False\"\"\"\n <|body_2|>\n\n def get_income_list_by_user_id(user_id):\n \"\"\"Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes\"\"\"\n <|body_3|>\n\n def get_info_income(user_id, income_id):\n \"\"\"Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Income:\n \"\"\"Model for manipulation data regarding Income instance.\"\"\"\n\n def create(name, currency, image_id, user_id, owner_id):\n \"\"\"Update an income table in a database. :params: name - new name for income, currency - currency for income, amount - amount of edited income, image_id - image for income, user_id - id's of user :return: True if success, else False\"\"\"\n create_time = datetime.now().timestamp()\n mod_time = create_time\n query = '\\n INSERT INTO income (name, currency, user_id, create_time, mod_time, image_id, owner_id)\\n VALUES (%s, %s, %s, %s, %s, %s, %s);\\n '\n args = (name, currency, user_id, create_time, mod_time, image_id, owner_id)\n Income._make_transaction(query, args)\n\n def update_income_in_db(income_id, name, image_id, mod_time):\n \"\"\"Update an income table in a database. :params: income_id - id of edited income, name - new name for income, amount - amount of edited income, image_id - image for income :return: True if success, else False\"\"\"\n sql = '\\n UPDATE income\\n SET name=%s, image_id = %s, mod_time = %s\\n WHERE income.id=%s;\\n '\n args = (name, image_id, mod_time, income_id)\n Income._make_transaction(sql, args)\n\n def delete_income(income_id):\n \"\"\"Deletes an income field in a database. :params: income_id - id of income. :return: True if success, else False\"\"\"\n sql = '\\n DELETE FROM income\\n WHERE id=%s;\\n '\n args = (income_id,)\n Income._make_transaction(sql, args)\n\n def get_income_list_by_user_id(user_id):\n \"\"\"Gets a list of incomes by user id. :params: user_id - id of logged user :return: list of incomes\"\"\"\n sql = '\\n SELECT\\n income.id, income.name, cs.currency,\\n income.mod_time, image.css\\n FROM income\\n LEFT JOIN image ON income.image_id = image.id\\n LEFT JOIN currencies cs ON income.currency = cs.id\\n WHERE income.user_id=%s\\n ORDER BY income.name;\\n '\n args = (user_id,)\n query = Income._make_select(sql, args)\n return query\n\n def get_info_income(user_id, income_id):\n \"\"\"Gets a detailed information of incomes for a logged user. :params: user_id - id of logged user, income_id - id of edited income :return: list of incomes\"\"\"\n sql = '\\n SELECT\\n income.id, income.name, currencies.id as currency_id,\\n income.mod_time, image.css, income.image_id, currencies.currency\\n FROM income\\n JOIN image ON income.image_id = image.id\\n JOIN currencies ON income.currency = currencies.id\\n WHERE income.user_id=%s and income.id=%s\\n ORDER BY income.name;\\n '\n args = (user_id, income_id)\n query = Income._make_select(sql, args)\n if query:\n return query[0]\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "moneta/src/python/db/income.py", "source_repo": "lv-386-python/moneta", "split": "test", "star_events_count": 7} {"blob_id": "8c6a886cc58ce14c81a980388fc45dcdc4f0a2fb", "bodies": ["personal = PersonalInfoView(self.driver)\nself.assertEqual(personal.get_title, personal.get_yaml_data('personal_info_data', 'title'))\nself.assertEqual(personal.get_my_photo_text, personal.get_yaml_data('personal_info_data', 'my_photo_text'))\nself.assertEqual(personal.get_my_name_text, personal.get_yaml_data('personal_info_data', 'my_name_text'))\nself.assertEqual(personal.get_my_age_text, personal.get_yaml_data('personal_info_data', 'my_age_text'))\nself.assertEqual(personal.get_experience_text, personal.get_yaml_data('personal_info_data', 'experience_text'))\nself.assertEqual(personal.get_familiar_area_text, personal.get_yaml_data('personal_info_data', 'familiar_area_text'))\nself.assertEqual(personal.get_my_profile_text, personal.get_yaml_data('personal_info_data', 'personal_profile_text'))\nself.assertEqual(personal.get_wechat_text, personal.get_yaml_data('personal_info_data', 'wechat_code_text'))", "personal = PersonalInfoView(self.driver)\npersonal.goto_update_photo()\nself.assertTrue(personal.is_toast_exist('personal', '图片上传成功!'))"], "bodies_text": "<|body_start_0|>\n personal = PersonalInfoView(self.driver)\n self.assertEqual(personal.get_title, personal.get_yaml_data('personal_info_data', 'title'))\n self.assertEqual(personal.get_my_photo_text, personal.get_yaml_data('personal_info_data', 'my_photo_text'))\n self.assertEqual(personal.get_my_name_text, personal.get_yaml_data('personal_info_data', 'my_name_text'))\n self.assertEqual(personal.get_my_age_text, personal.get_yaml_data('personal_info_data', 'my_age_text'))\n self.assertEqual(personal.get_experience_text, personal.get_yaml_data('personal_info_data', 'experience_text'))\n self.assertEqual(personal.get_familiar_area_text, personal.get_yaml_data('personal_info_data', 'familiar_area_text'))\n self.assertEqual(personal.get_my_profile_text, personal.get_yaml_data('personal_info_data', 'personal_profile_text'))\n self.assertEqual(personal.get_wechat_text, personal.get_yaml_data('personal_info_data', 'wechat_code_text'))\n<|end_body_0|>\n\n<|body_start_1|>\n personal = PersonalInfoView(self.driver)\n personal.goto_update_photo()\n self.assertTrue(personal.is_toast_exist('personal', '图片上传成功!'))\n<|end_body_1|>\n", "class_docstring": "个人信息页测试用例", "class_name": "PersonalCase", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PersonalCase:\n \"\"\"个人信息页测试用例\"\"\"\n\n def test_personal_001_correct_text(self):\n \"\"\"用例1:校验个人信息页文本内容是否正确\"\"\"\n <|body_0|>\n\n def test_personal_002_upload_photo(self):\n \"\"\"用例2:测试上传头像是否成功\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n personal = PersonalInfoView(self.driver)\n self.assertEqual(personal.get_title, personal.get_yaml_data('personal_info_data', 'title'))\n self.assertEqual(personal.get_my_photo_text, personal.get_yaml_data('personal_info_data', 'my_photo_text'))\n self.assertEqual(personal.get_my_name_text, personal.get_yaml_data('personal_info_data', 'my_name_text'))\n self.assertEqual(personal.get_my_age_text, personal.get_yaml_data('personal_info_data', 'my_age_text'))\n self.assertEqual(personal.get_experience_text, personal.get_yaml_data('personal_info_data', 'experience_text'))\n self.assertEqual(personal.get_familiar_area_text, personal.get_yaml_data('personal_info_data', 'familiar_area_text'))\n self.assertEqual(personal.get_my_profile_text, personal.get_yaml_data('personal_info_data', 'personal_profile_text'))\n self.assertEqual(personal.get_wechat_text, personal.get_yaml_data('personal_info_data', 'wechat_code_text'))\n<|end_body_0|>\n\n<|body_start_1|>\n personal = PersonalInfoView(self.driver)\n personal.goto_update_photo()\n self.assertTrue(personal.is_toast_exist('personal', '图片上传成功!'))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000336", "length_bytes": 1973, "license_type": "no_license", "methods": [{"docstring": "用例1:校验个人信息页文本内容是否正确", "name": "test_personal_001_correct_text", "signature": "def test_personal_001_correct_text(self)"}, {"docstring": "用例2:测试上传头像是否成功", "name": "test_personal_002_upload_photo", "signature": "def test_personal_002_upload_photo(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034741", "prompt": "Implement the Python class `PersonalCase` described below.\n\nClass description:\n个人信息页测试用例\n\nMethod signatures and docstrings:\n- def test_personal_001_correct_text(self): 用例1:校验个人信息页文本内容是否正确\n- def test_personal_002_upload_photo(self): 用例2:测试上传头像是否成功", "prompted_full_text": "Implement the Python class `PersonalCase` described below.\n\nClass description:\n个人信息页测试用例\n\nMethod signatures and docstrings:\n- def test_personal_001_correct_text(self): 用例1:校验个人信息页文本内容是否正确\n- def test_personal_002_upload_photo(self): 用例2:测试上传头像是否成功\n\n<|skeleton|>\nclass PersonalCase:\n \"\"\"个人信息页测试用例\"\"\"\n\n def test_personal_001_correct_text(self):\n \"\"\"用例1:校验个人信息页文本内容是否正确\"\"\"\n <|body_0|>\n\n def test_personal_002_upload_photo(self):\n \"\"\"用例2:测试上传头像是否成功\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n personal = PersonalInfoView(self.driver)\n self.assertEqual(personal.get_title, personal.get_yaml_data('personal_info_data', 'title'))\n self.assertEqual(personal.get_my_photo_text, personal.get_yaml_data('personal_info_data', 'my_photo_text'))\n self.assertEqual(personal.get_my_name_text, personal.get_yaml_data('personal_info_data', 'my_name_text'))\n self.assertEqual(personal.get_my_age_text, personal.get_yaml_data('personal_info_data', 'my_age_text'))\n self.assertEqual(personal.get_experience_text, personal.get_yaml_data('personal_info_data', 'experience_text'))\n self.assertEqual(personal.get_familiar_area_text, personal.get_yaml_data('personal_info_data', 'familiar_area_text'))\n self.assertEqual(personal.get_my_profile_text, personal.get_yaml_data('personal_info_data', 'personal_profile_text'))\n self.assertEqual(personal.get_wechat_text, personal.get_yaml_data('personal_info_data', 'wechat_code_text'))\n<|end_body_0|>\n\n<|body_start_1|>\n personal = PersonalInfoView(self.driver)\n personal.goto_update_photo()\n self.assertTrue(personal.is_toast_exist('personal', '图片上传成功!'))\n<|end_body_1|>\n", "revision_id": "7c6e14a82f5dd3c6aeb1134bc503681584437c41", "skeleton": "<|skeleton|>\nclass PersonalCase:\n \"\"\"个人信息页测试用例\"\"\"\n\n def test_personal_001_correct_text(self):\n \"\"\"用例1:校验个人信息页文本内容是否正确\"\"\"\n <|body_0|>\n\n def test_personal_002_upload_photo(self):\n \"\"\"用例2:测试上传头像是否成功\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PersonalCase:\n \"\"\"个人信息页测试用例\"\"\"\n\n def test_personal_001_correct_text(self):\n \"\"\"用例1:校验个人信息页文本内容是否正确\"\"\"\n personal = PersonalInfoView(self.driver)\n self.assertEqual(personal.get_title, personal.get_yaml_data('personal_info_data', 'title'))\n self.assertEqual(personal.get_my_photo_text, personal.get_yaml_data('personal_info_data', 'my_photo_text'))\n self.assertEqual(personal.get_my_name_text, personal.get_yaml_data('personal_info_data', 'my_name_text'))\n self.assertEqual(personal.get_my_age_text, personal.get_yaml_data('personal_info_data', 'my_age_text'))\n self.assertEqual(personal.get_experience_text, personal.get_yaml_data('personal_info_data', 'experience_text'))\n self.assertEqual(personal.get_familiar_area_text, personal.get_yaml_data('personal_info_data', 'familiar_area_text'))\n self.assertEqual(personal.get_my_profile_text, personal.get_yaml_data('personal_info_data', 'personal_profile_text'))\n self.assertEqual(personal.get_wechat_text, personal.get_yaml_data('personal_info_data', 'wechat_code_text'))\n\n def test_personal_002_upload_photo(self):\n \"\"\"用例2:测试上传头像是否成功\"\"\"\n personal = PersonalInfoView(self.driver)\n personal.goto_update_photo()\n self.assertTrue(personal.is_toast_exist('personal', '图片上传成功!'))\n", "source": "the_stack_v2_python_sparse", "source_path": "woniujiacc_ui_project/test_case/personal_case.py", "source_repo": "GorkyZH/woniujia_cc_ui", "split": "test", "star_events_count": 2} {"blob_id": "7f88eb36ef4e836ad666927865a346952e6cb7f6", "bodies": ["super().__init__(*args, **kwargs)\nself.resource_type = resource_type\nself.fields['auto_doi'].disabled = True\nif not settings.DATACITE_PREFIX:\n self.initial['auto_doi'] = False\nself.quality_assurance_fields = EditLog.QUALITY_ASSURANCE_FIELDS[resource_type.id]\nrm_fields = set(self.base_fields) - set(self.quality_assurance_fields) - set(EditLog.EDITOR_FIELDS)\nfor f in rm_fields:\n del self.fields[f]\nfor f, lbl in EditLog.LABELS[resource_type.id].items():\n hints = EditLog.HINTS.get(f)\n if hints:\n lbl += '
  • ' + '
  • '.join(hints) + '
'\n self.fields[f].label = lbl\nfor f in self.quality_assurance_fields:\n self.fields[f].required = True", "if self.errors:\n return\nif self.cleaned_data['decision'] == 2:\n for field in self.quality_assurance_fields:\n if not self.cleaned_data[field]:\n raise forms.ValidationError('The quality assurance fields must all pass before you accept the project')", "with transaction.atomic():\n edit_log = super().save(commit=False)\n project = edit_log.project\n now = timezone.now()\n edit_log.decision_datetime = now\n edit_log.save()\n if edit_log.decision == 0:\n project.reject()\n edit_log = EditLog.objects.get(id=edit_log.id)\n elif edit_log.decision == 1:\n project.submission_status = SubmissionStatus.NEEDS_RESUBMISSION\n project.revision_request_datetime = now\n project.latest_reminder = now\n project.save()\n else:\n project.submission_status = SubmissionStatus.NEEDS_COPYEDIT\n project.editor_accept_datetime = now\n project.latest_reminder = now\n CopyeditLog.objects.create(project=project)\n project.save()\n if self.cleaned_data['auto_doi']:\n if not project.doi:\n payload = generate_doi_payload(project, event='draft')\n register_doi(payload, project)\n if not project.core_project.doi:\n payload = generate_doi_payload(project, event='draft', core_project=True)\n register_doi(payload, project.core_project)\n return edit_log"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.resource_type = resource_type\n self.fields['auto_doi'].disabled = True\n if not settings.DATACITE_PREFIX:\n self.initial['auto_doi'] = False\n self.quality_assurance_fields = EditLog.QUALITY_ASSURANCE_FIELDS[resource_type.id]\n rm_fields = set(self.base_fields) - set(self.quality_assurance_fields) - set(EditLog.EDITOR_FIELDS)\n for f in rm_fields:\n del self.fields[f]\n for f, lbl in EditLog.LABELS[resource_type.id].items():\n hints = EditLog.HINTS.get(f)\n if hints:\n lbl += '
  • ' + '
  • '.join(hints) + '
'\n self.fields[f].label = lbl\n for f in self.quality_assurance_fields:\n self.fields[f].required = True\n<|end_body_0|>\n\n<|body_start_1|>\n if self.errors:\n return\n if self.cleaned_data['decision'] == 2:\n for field in self.quality_assurance_fields:\n if not self.cleaned_data[field]:\n raise forms.ValidationError('The quality assurance fields must all pass before you accept the project')\n<|end_body_1|>\n\n<|body_start_2|>\n with transaction.atomic():\n edit_log = super().save(commit=False)\n project = edit_log.project\n now = timezone.now()\n edit_log.decision_datetime = now\n edit_log.save()\n if edit_log.decision == 0:\n project.reject()\n edit_log = EditLog.objects.get(id=edit_log.id)\n elif edit_log.decision == 1:\n project.submission_status = SubmissionStatus.NEEDS_RESUBMISSION\n project.revision_request_datetime = now\n project.latest_reminder = now\n project.save()\n else:\n project.submission_status = SubmissionStatus.NEEDS_COPYEDIT\n project.editor_accept_datetime = now\n project.latest_reminder = now\n CopyeditLog.objects.create(project=project)\n project.save()\n if self.cleaned_data['auto_doi']:\n if not project.doi:\n payload = generate_doi_payload(project, event='draft')\n register_doi(payload, project)\n if not project.core_project.doi:\n payload = generate_doi_payload(project, event='draft', core_project=True)\n register_doi(payload, project.core_project)\n return edit_log\n<|end_body_2|>\n", "class_docstring": "For an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form", "class_name": "EditSubmissionForm", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EditSubmissionForm:\n \"\"\"For an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form\"\"\"\n\n def __init__(self, resource_type, *args, **kwargs):\n \"\"\"Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"May not accept if the quality assurance fields are not all True\"\"\"\n <|body_1|>\n\n def save(self):\n \"\"\"Process the editor decision\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.resource_type = resource_type\n self.fields['auto_doi'].disabled = True\n if not settings.DATACITE_PREFIX:\n self.initial['auto_doi'] = False\n self.quality_assurance_fields = EditLog.QUALITY_ASSURANCE_FIELDS[resource_type.id]\n rm_fields = set(self.base_fields) - set(self.quality_assurance_fields) - set(EditLog.EDITOR_FIELDS)\n for f in rm_fields:\n del self.fields[f]\n for f, lbl in EditLog.LABELS[resource_type.id].items():\n hints = EditLog.HINTS.get(f)\n if hints:\n lbl += '
  • ' + '
  • '.join(hints) + '
'\n self.fields[f].label = lbl\n for f in self.quality_assurance_fields:\n self.fields[f].required = True\n<|end_body_0|>\n\n<|body_start_1|>\n if self.errors:\n return\n if self.cleaned_data['decision'] == 2:\n for field in self.quality_assurance_fields:\n if not self.cleaned_data[field]:\n raise forms.ValidationError('The quality assurance fields must all pass before you accept the project')\n<|end_body_1|>\n\n<|body_start_2|>\n with transaction.atomic():\n edit_log = super().save(commit=False)\n project = edit_log.project\n now = timezone.now()\n edit_log.decision_datetime = now\n edit_log.save()\n if edit_log.decision == 0:\n project.reject()\n edit_log = EditLog.objects.get(id=edit_log.id)\n elif edit_log.decision == 1:\n project.submission_status = SubmissionStatus.NEEDS_RESUBMISSION\n project.revision_request_datetime = now\n project.latest_reminder = now\n project.save()\n else:\n project.submission_status = SubmissionStatus.NEEDS_COPYEDIT\n project.editor_accept_datetime = now\n project.latest_reminder = now\n CopyeditLog.objects.create(project=project)\n project.save()\n if self.cleaned_data['auto_doi']:\n if not project.doi:\n payload = generate_doi_payload(project, event='draft')\n register_doi(payload, project)\n if not project.core_project.doi:\n payload = generate_doi_payload(project, event='draft', core_project=True)\n register_doi(payload, project.core_project)\n return edit_log\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000337", "length_bytes": 32053, "license_type": "permissive", "methods": [{"docstring": "Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.", "name": "__init__", "signature": "def __init__(self, resource_type, *args, **kwargs)"}, {"docstring": "May not accept if the quality assurance fields are not all True", "name": "clean", "signature": "def clean(self)"}, {"docstring": "Process the editor decision", "name": "save", "signature": "def save(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_021643", "prompt": "Implement the Python class `EditSubmissionForm` described below.\n\nClass description:\nFor an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form\n\nMethod signatures and docstrings:\n- def __init__(self, resource_type, *args, **kwargs): Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.\n- def clean(self): May not accept if the quality assurance fields are not all True\n- def save(self): Process the editor decision", "prompted_full_text": "Implement the Python class `EditSubmissionForm` described below.\n\nClass description:\nFor an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form\n\nMethod signatures and docstrings:\n- def __init__(self, resource_type, *args, **kwargs): Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.\n- def clean(self): May not accept if the quality assurance fields are not all True\n- def save(self): Process the editor decision\n\n<|skeleton|>\nclass EditSubmissionForm:\n \"\"\"For an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form\"\"\"\n\n def __init__(self, resource_type, *args, **kwargs):\n \"\"\"Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"May not accept if the quality assurance fields are not all True\"\"\"\n <|body_1|>\n\n def save(self):\n \"\"\"Process the editor decision\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self.resource_type = resource_type\n self.fields['auto_doi'].disabled = True\n if not settings.DATACITE_PREFIX:\n self.initial['auto_doi'] = False\n self.quality_assurance_fields = EditLog.QUALITY_ASSURANCE_FIELDS[resource_type.id]\n rm_fields = set(self.base_fields) - set(self.quality_assurance_fields) - set(EditLog.EDITOR_FIELDS)\n for f in rm_fields:\n del self.fields[f]\n for f, lbl in EditLog.LABELS[resource_type.id].items():\n hints = EditLog.HINTS.get(f)\n if hints:\n lbl += '
  • ' + '
  • '.join(hints) + '
'\n self.fields[f].label = lbl\n for f in self.quality_assurance_fields:\n self.fields[f].required = True\n<|end_body_0|>\n\n<|body_start_1|>\n if self.errors:\n return\n if self.cleaned_data['decision'] == 2:\n for field in self.quality_assurance_fields:\n if not self.cleaned_data[field]:\n raise forms.ValidationError('The quality assurance fields must all pass before you accept the project')\n<|end_body_1|>\n\n<|body_start_2|>\n with transaction.atomic():\n edit_log = super().save(commit=False)\n project = edit_log.project\n now = timezone.now()\n edit_log.decision_datetime = now\n edit_log.save()\n if edit_log.decision == 0:\n project.reject()\n edit_log = EditLog.objects.get(id=edit_log.id)\n elif edit_log.decision == 1:\n project.submission_status = SubmissionStatus.NEEDS_RESUBMISSION\n project.revision_request_datetime = now\n project.latest_reminder = now\n project.save()\n else:\n project.submission_status = SubmissionStatus.NEEDS_COPYEDIT\n project.editor_accept_datetime = now\n project.latest_reminder = now\n CopyeditLog.objects.create(project=project)\n project.save()\n if self.cleaned_data['auto_doi']:\n if not project.doi:\n payload = generate_doi_payload(project, event='draft')\n register_doi(payload, project)\n if not project.core_project.doi:\n payload = generate_doi_payload(project, event='draft', core_project=True)\n register_doi(payload, project.core_project)\n return edit_log\n<|end_body_2|>\n", "revision_id": "304e093dc550da8636552dc601d6545c07ffc771", "skeleton": "<|skeleton|>\nclass EditSubmissionForm:\n \"\"\"For an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form\"\"\"\n\n def __init__(self, resource_type, *args, **kwargs):\n \"\"\"Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.\"\"\"\n <|body_0|>\n\n def clean(self):\n \"\"\"May not accept if the quality assurance fields are not all True\"\"\"\n <|body_1|>\n\n def save(self):\n \"\"\"Process the editor decision\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EditSubmissionForm:\n \"\"\"For an editor to make a decision regarding a submission. Fields are specified for each resource type The labels are stored in the model because it requires them to render results without using this form\"\"\"\n\n def __init__(self, resource_type, *args, **kwargs):\n \"\"\"Set the appropriate fields/labels for the given resource type, and make them required. Remove irrelevant fields.\"\"\"\n super().__init__(*args, **kwargs)\n self.resource_type = resource_type\n self.fields['auto_doi'].disabled = True\n if not settings.DATACITE_PREFIX:\n self.initial['auto_doi'] = False\n self.quality_assurance_fields = EditLog.QUALITY_ASSURANCE_FIELDS[resource_type.id]\n rm_fields = set(self.base_fields) - set(self.quality_assurance_fields) - set(EditLog.EDITOR_FIELDS)\n for f in rm_fields:\n del self.fields[f]\n for f, lbl in EditLog.LABELS[resource_type.id].items():\n hints = EditLog.HINTS.get(f)\n if hints:\n lbl += '
  • ' + '
  • '.join(hints) + '
'\n self.fields[f].label = lbl\n for f in self.quality_assurance_fields:\n self.fields[f].required = True\n\n def clean(self):\n \"\"\"May not accept if the quality assurance fields are not all True\"\"\"\n if self.errors:\n return\n if self.cleaned_data['decision'] == 2:\n for field in self.quality_assurance_fields:\n if not self.cleaned_data[field]:\n raise forms.ValidationError('The quality assurance fields must all pass before you accept the project')\n\n def save(self):\n \"\"\"Process the editor decision\"\"\"\n with transaction.atomic():\n edit_log = super().save(commit=False)\n project = edit_log.project\n now = timezone.now()\n edit_log.decision_datetime = now\n edit_log.save()\n if edit_log.decision == 0:\n project.reject()\n edit_log = EditLog.objects.get(id=edit_log.id)\n elif edit_log.decision == 1:\n project.submission_status = SubmissionStatus.NEEDS_RESUBMISSION\n project.revision_request_datetime = now\n project.latest_reminder = now\n project.save()\n else:\n project.submission_status = SubmissionStatus.NEEDS_COPYEDIT\n project.editor_accept_datetime = now\n project.latest_reminder = now\n CopyeditLog.objects.create(project=project)\n project.save()\n if self.cleaned_data['auto_doi']:\n if not project.doi:\n payload = generate_doi_payload(project, event='draft')\n register_doi(payload, project)\n if not project.core_project.doi:\n payload = generate_doi_payload(project, event='draft', core_project=True)\n register_doi(payload, project.core_project)\n return edit_log\n", "source": "the_stack_v2_python_sparse", "source_path": "physionet-django/console/forms.py", "source_repo": "MIT-LCP/physionet-build", "split": "test", "star_events_count": 50} {"blob_id": "6aafaee56cdc2e8d026c60b19e97cedf5ba2a44e", "bodies": ["limit = request.args.get('limit')\nstart, end = validators.validate_date_range()\nreturn tracks_import.TracksImport.get_imports_per_date(start, end, end_id=limit)", "data = request.json or {}\naccount_id = data.get('account_id', '')\nstart_date = data.get('start_date')\nend_date = data.get('end_date')\nradio_name = data.get('radio_name')\nreturn radio.Radio.update_radio_tracks_per_range(radio_name=radio_name, start_date=start_date, end_date=end_date, account_id=account_id)"], "bodies_text": "<|body_start_0|>\n limit = request.args.get('limit')\n start, end = validators.validate_date_range()\n return tracks_import.TracksImport.get_imports_per_date(start, end, end_id=limit)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.json or {}\n account_id = data.get('account_id', '')\n start_date = data.get('start_date')\n end_date = data.get('end_date')\n radio_name = data.get('radio_name')\n return radio.Radio.update_radio_tracks_per_range(radio_name=radio_name, start_date=start_date, end_date=end_date, account_id=account_id)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DBImports4Date", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DBImports4Date:\n\n def get(self):\n \"\"\"Imports per date range\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Import tracks for radio per date range\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n limit = request.args.get('limit')\n start, end = validators.validate_date_range()\n return tracks_import.TracksImport.get_imports_per_date(start, end, end_id=limit)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.json or {}\n account_id = data.get('account_id', '')\n start_date = data.get('start_date')\n end_date = data.get('end_date')\n radio_name = data.get('radio_name')\n return radio.Radio.update_radio_tracks_per_range(radio_name=radio_name, start_date=start_date, end_date=end_date, account_id=account_id)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000338", "length_bytes": 4433, "license_type": "no_license", "methods": [{"docstring": "Imports per date range", "name": "get", "signature": "def get(self)"}, {"docstring": "Import tracks for radio per date range", "name": "post", "signature": "def post(self)"}], "n_methods": 2, "prompt": "Implement the Python class `DBImports4Date` described below.\n\nClass description:\nImplement the DBImports4Date class.\n\nMethod signatures and docstrings:\n- def get(self): Imports per date range\n- def post(self): Import tracks for radio per date range", "prompted_full_text": "Implement the Python class `DBImports4Date` described below.\n\nClass description:\nImplement the DBImports4Date class.\n\nMethod signatures and docstrings:\n- def get(self): Imports per date range\n- def post(self): Import tracks for radio per date range\n\n<|skeleton|>\nclass DBImports4Date:\n\n def get(self):\n \"\"\"Imports per date range\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Import tracks for radio per date range\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n limit = request.args.get('limit')\n start, end = validators.validate_date_range()\n return tracks_import.TracksImport.get_imports_per_date(start, end, end_id=limit)\n<|end_body_0|>\n\n<|body_start_1|>\n data = request.json or {}\n account_id = data.get('account_id', '')\n start_date = data.get('start_date')\n end_date = data.get('end_date')\n radio_name = data.get('radio_name')\n return radio.Radio.update_radio_tracks_per_range(radio_name=radio_name, start_date=start_date, end_date=end_date, account_id=account_id)\n<|end_body_1|>\n", "revision_id": "be7876fac78cccdd8722f35ab9b0cfb1cbbebe34", "skeleton": "<|skeleton|>\nclass DBImports4Date:\n\n def get(self):\n \"\"\"Imports per date range\"\"\"\n <|body_0|>\n\n def post(self):\n \"\"\"Import tracks for radio per date range\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DBImports4Date:\n def get(self):\n \"\"\"Imports per date range\"\"\"\n limit = request.args.get('limit')\n start, end = validators.validate_date_range()\n return tracks_import.TracksImport.get_imports_per_date(start, end, end_id=limit)\n\n def post(self):\n \"\"\"Import tracks for radio per date range\"\"\"\n data = request.json or {}\n account_id = data.get('account_id', '')\n start_date = data.get('start_date')\n end_date = data.get('end_date')\n radio_name = data.get('radio_name')\n return radio.Radio.update_radio_tracks_per_range(radio_name=radio_name, start_date=start_date, end_date=end_date, account_id=account_id)\n", "source": "the_stack_v2_python_sparse", "source_path": "application/apis/dbimports_api.py", "source_repo": "markifresh/OnlineRadio", "split": "test", "star_events_count": 0} {"blob_id": "acd5a69c53a2919d13368cac254a20a2fdc1d352", "bodies": ["basic_data = super(Endor, self).GetMetadata()\nbasic_data['edw_service_type'] = 'endor'\nbasic_data.update(self.client_interface.GetMetadata())\nbasic_data.update(self.GetDataDetails())\nreturn basic_data", "data_details = {}\ndataset_id = re.split('\\\\.', self.cluster_identifier)[1]\nparsed_id = re.split('_', dataset_id)\ndata_details['format'] = parsed_id[1]\ndata_details['compression'] = parsed_id[2]\ndata_details['partitioning'] = parsed_id[3]\ndata_details['location'] = parsed_id[4]\nreturn data_details"], "bodies_text": "<|body_start_0|>\n basic_data = super(Endor, self).GetMetadata()\n basic_data['edw_service_type'] = 'endor'\n basic_data.update(self.client_interface.GetMetadata())\n basic_data.update(self.GetDataDetails())\n return basic_data\n<|end_body_0|>\n\n<|body_start_1|>\n data_details = {}\n dataset_id = re.split('\\\\.', self.cluster_identifier)[1]\n parsed_id = re.split('_', dataset_id)\n data_details['format'] = parsed_id[1]\n data_details['compression'] = parsed_id[2]\n data_details['partitioning'] = parsed_id[3]\n data_details['location'] = parsed_id[4]\n return data_details\n<|end_body_1|>\n", "class_docstring": "Class representing BigQuery Endor service.", "class_name": "Endor", "detected_licenses": ["Classpath-exception-2.0", "BSD-3-Clause", "AGPL-3.0-only", "MIT", "GPL-2.0-only", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Endor:\n \"\"\"Class representing BigQuery Endor service.\"\"\"\n\n def GetMetadata(self) -> Dict[str, str]:\n \"\"\"Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.\"\"\"\n <|body_0|>\n\n def GetDataDetails(self) -> Dict[str, str]:\n \"\"\"Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n basic_data = super(Endor, self).GetMetadata()\n basic_data['edw_service_type'] = 'endor'\n basic_data.update(self.client_interface.GetMetadata())\n basic_data.update(self.GetDataDetails())\n return basic_data\n<|end_body_0|>\n\n<|body_start_1|>\n data_details = {}\n dataset_id = re.split('\\\\.', self.cluster_identifier)[1]\n parsed_id = re.split('_', dataset_id)\n data_details['format'] = parsed_id[1]\n data_details['compression'] = parsed_id[2]\n data_details['partitioning'] = parsed_id[3]\n data_details['location'] = parsed_id[4]\n return data_details\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000339", "length_bytes": 24185, "license_type": "permissive", "methods": [{"docstring": "Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.", "name": "GetMetadata", "signature": "def GetMetadata(self) -> Dict[str, str]"}, {"docstring": "Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)", "name": "GetDataDetails", "signature": "def GetDataDetails(self) -> Dict[str, str]"}], "n_methods": 2, "prompt": "Implement the Python class `Endor` described below.\n\nClass description:\nClass representing BigQuery Endor service.\n\nMethod signatures and docstrings:\n- def GetMetadata(self) -> Dict[str, str]: Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.\n- def GetDataDetails(self) -> Dict[str, str]: Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)", "prompted_full_text": "Implement the Python class `Endor` described below.\n\nClass description:\nClass representing BigQuery Endor service.\n\nMethod signatures and docstrings:\n- def GetMetadata(self) -> Dict[str, str]: Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.\n- def GetDataDetails(self) -> Dict[str, str]: Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)\n\n<|skeleton|>\nclass Endor:\n \"\"\"Class representing BigQuery Endor service.\"\"\"\n\n def GetMetadata(self) -> Dict[str, str]:\n \"\"\"Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.\"\"\"\n <|body_0|>\n\n def GetDataDetails(self) -> Dict[str, str]:\n \"\"\"Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n basic_data = super(Endor, self).GetMetadata()\n basic_data['edw_service_type'] = 'endor'\n basic_data.update(self.client_interface.GetMetadata())\n basic_data.update(self.GetDataDetails())\n return basic_data\n<|end_body_0|>\n\n<|body_start_1|>\n data_details = {}\n dataset_id = re.split('\\\\.', self.cluster_identifier)[1]\n parsed_id = re.split('_', dataset_id)\n data_details['format'] = parsed_id[1]\n data_details['compression'] = parsed_id[2]\n data_details['partitioning'] = parsed_id[3]\n data_details['location'] = parsed_id[4]\n return data_details\n<|end_body_1|>\n", "revision_id": "d0699f32998898757b036704fba39e5471641f01", "skeleton": "<|skeleton|>\nclass Endor:\n \"\"\"Class representing BigQuery Endor service.\"\"\"\n\n def GetMetadata(self) -> Dict[str, str]:\n \"\"\"Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.\"\"\"\n <|body_0|>\n\n def GetDataDetails(self) -> Dict[str, str]:\n \"\"\"Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Endor:\n \"\"\"Class representing BigQuery Endor service.\"\"\"\n\n def GetMetadata(self) -> Dict[str, str]:\n \"\"\"Return a dictionary of the metadata for the BigQuery Endor service. Returns: A dictionary set to Endor service details.\"\"\"\n basic_data = super(Endor, self).GetMetadata()\n basic_data['edw_service_type'] = 'endor'\n basic_data.update(self.client_interface.GetMetadata())\n basic_data.update(self.GetDataDetails())\n return basic_data\n\n def GetDataDetails(self) -> Dict[str, str]:\n \"\"\"Returns a dictionary with underlying data details. cluster_identifier = . Data details are extracted from the dataset_id that follows the format: ____ eg. tpch100_parquet_uncompressed_unpartitoned_s3 Returns: A dictionary set to underlying data's details (format, etc.)\"\"\"\n data_details = {}\n dataset_id = re.split('\\\\.', self.cluster_identifier)[1]\n parsed_id = re.split('_', dataset_id)\n data_details['format'] = parsed_id[1]\n data_details['compression'] = parsed_id[2]\n data_details['partitioning'] = parsed_id[3]\n data_details['location'] = parsed_id[4]\n return data_details\n", "source": "the_stack_v2_python_sparse", "source_path": "perfkitbenchmarker/providers/gcp/bigquery.py", "source_repo": "GoogleCloudPlatform/PerfKitBenchmarker", "split": "test", "star_events_count": 1923} {"blob_id": "96647a534284a3a0c57ba511cb75f8bb8cd45ddb", "bodies": ["session = db_apis.get_session()\nwith session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\nif loadbalancer:\n self.amphora_driver.update(loadbalancer)\nelse:\n LOG.error('Load balancer %s for listeners update not found. Skipping update.', loadbalancer_id)", "LOG.warning('Reverting listeners updates.')\nsession = db_apis.get_session()\nwith session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\nfor listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)"], "bodies_text": "<|body_start_0|>\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n if loadbalancer:\n self.amphora_driver.update(loadbalancer)\n else:\n LOG.error('Load balancer %s for listeners update not found. Skipping update.', loadbalancer_id)\n<|end_body_0|>\n\n<|body_start_1|>\n LOG.warning('Reverting listeners updates.')\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n<|end_body_1|>\n", "class_docstring": "Task to update amphora with all specified listeners' configurations.", "class_name": "ListenersUpdate", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer_id):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, loadbalancer_id, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n if loadbalancer:\n self.amphora_driver.update(loadbalancer)\n else:\n LOG.error('Load balancer %s for listeners update not found. Skipping update.', loadbalancer_id)\n<|end_body_0|>\n\n<|body_start_1|>\n LOG.warning('Reverting listeners updates.')\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000340", "length_bytes": 28773, "license_type": "permissive", "methods": [{"docstring": "Execute updates per listener for an amphora.", "name": "execute", "signature": "def execute(self, loadbalancer_id)"}, {"docstring": "Handle failed listeners updates.", "name": "revert", "signature": "def revert(self, loadbalancer_id, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_016828", "prompt": "Implement the Python class `ListenersUpdate` described below.\n\nClass description:\nTask to update amphora with all specified listeners' configurations.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer_id): Execute updates per listener for an amphora.\n- def revert(self, loadbalancer_id, *args, **kwargs): Handle failed listeners updates.", "prompted_full_text": "Implement the Python class `ListenersUpdate` described below.\n\nClass description:\nTask to update amphora with all specified listeners' configurations.\n\nMethod signatures and docstrings:\n- def execute(self, loadbalancer_id): Execute updates per listener for an amphora.\n- def revert(self, loadbalancer_id, *args, **kwargs): Handle failed listeners updates.\n\n<|skeleton|>\nclass ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer_id):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, loadbalancer_id, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n if loadbalancer:\n self.amphora_driver.update(loadbalancer)\n else:\n LOG.error('Load balancer %s for listeners update not found. Skipping update.', loadbalancer_id)\n<|end_body_0|>\n\n<|body_start_1|>\n LOG.warning('Reverting listeners updates.')\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n<|end_body_1|>\n", "revision_id": "0426285a41464a5015494584f109eed35a0d44db", "skeleton": "<|skeleton|>\nclass ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer_id):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n <|body_0|>\n\n def revert(self, loadbalancer_id, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ListenersUpdate:\n \"\"\"Task to update amphora with all specified listeners' configurations.\"\"\"\n\n def execute(self, loadbalancer_id):\n \"\"\"Execute updates per listener for an amphora.\"\"\"\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n if loadbalancer:\n self.amphora_driver.update(loadbalancer)\n else:\n LOG.error('Load balancer %s for listeners update not found. Skipping update.', loadbalancer_id)\n\n def revert(self, loadbalancer_id, *args, **kwargs):\n \"\"\"Handle failed listeners updates.\"\"\"\n LOG.warning('Reverting listeners updates.')\n session = db_apis.get_session()\n with session.begin():\n loadbalancer = self.loadbalancer_repo.get(session, id=loadbalancer_id)\n for listener in loadbalancer.listeners:\n self.task_utils.mark_listener_prov_status_error(listener.id)\n", "source": "the_stack_v2_python_sparse", "source_path": "octavia/controller/worker/v2/tasks/amphora_driver_tasks.py", "source_repo": "openstack/octavia", "split": "test", "star_events_count": 147} {"blob_id": "de4ed25e0e555321242cbce6ad4daf234edc3f4d", "bodies": ["existing_entities = existing_entities or {}\nnew_entities: EntityMapping = {}\nself._merge(session, entities, new_entities=new_entities, existing_entities=existing_entities)\nreturn list({**existing_entities, **new_entities}.values())", "processed_entities = []\nfor entity in entities:\n key = entity.entity_key\n existing_entity = existing_entities.get(key, new_entities.get(key))\n entity = self._sync_parent(session, entity, new_entities, existing_entities)\n if existing_entity:\n existing_entity = self._merge_columns(entity, existing_entity)\n self._append_children(existing_entity, *self._merge(session, entity.children, new_entities, existing_entities))\n entity = existing_entity\n else:\n new_entities[key] = entity\n processed_entities.append(entity)\nreturn processed_entities", "parent = get_parent(session, entity)\nif not parent:\n return entity\nexisting_parent = existing_entities.get(parent.entity_key, new_entities.get(parent.entity_key))\nif not existing_parent:\n return entity\nexisting_entity = next(iter((child for child in existing_parent.children if child.entity_key == entity.entity_key)), None)\nif not existing_entity:\n entity.parent = None\nelse:\n entity = cls._merge_columns(entity, existing_entity, include_children=True)\ncls._append_children(existing_parent, entity)\ncls._sync_parent(session, existing_parent, new_entities, existing_entities)\nreturn entity", "entity.children = list({**{e.entity_key: e for e in entity.children}, **{e.entity_key: e for e in children}}.values())\nfor child in children:\n child.parent = entity\n if entity.id:\n child.parent_id = entity.id", "columns = [col.key for col in entity.get_columns()]\nfor col in columns:\n if col == 'meta':\n existing_entity.meta = {**(existing_entity.meta or {}), **(entity.meta or {})}\n elif col not in ('id', 'created_at'):\n try:\n setattr(existing_entity, col, getattr(entity, col))\n except ObjectDeletedError as e:\n logger.warning('Could not set %s on entity <%s>: %s', col, existing_entity.entity_key, e)\nif include_children:\n existing_children = {e.entity_key: e for e in existing_entity.children}\n new_children = {e.entity_key: e for e in entity.children}\n updated_children = {}\n for key, child in new_children.items():\n existing_child = existing_children.get(key)\n updated_children[key] = cls._merge_columns(child, existing_child, include_children=True) if existing_child else child\n cls._append_children(existing_entity, *updated_children.values())\nreturn existing_entity"], "bodies_text": "<|body_start_0|>\n existing_entities = existing_entities or {}\n new_entities: EntityMapping = {}\n self._merge(session, entities, new_entities=new_entities, existing_entities=existing_entities)\n return list({**existing_entities, **new_entities}.values())\n<|end_body_0|>\n\n<|body_start_1|>\n processed_entities = []\n for entity in entities:\n key = entity.entity_key\n existing_entity = existing_entities.get(key, new_entities.get(key))\n entity = self._sync_parent(session, entity, new_entities, existing_entities)\n if existing_entity:\n existing_entity = self._merge_columns(entity, existing_entity)\n self._append_children(existing_entity, *self._merge(session, entity.children, new_entities, existing_entities))\n entity = existing_entity\n else:\n new_entities[key] = entity\n processed_entities.append(entity)\n return processed_entities\n<|end_body_1|>\n\n<|body_start_2|>\n parent = get_parent(session, entity)\n if not parent:\n return entity\n existing_parent = existing_entities.get(parent.entity_key, new_entities.get(parent.entity_key))\n if not existing_parent:\n return entity\n existing_entity = next(iter((child for child in existing_parent.children if child.entity_key == entity.entity_key)), None)\n if not existing_entity:\n entity.parent = None\n else:\n entity = cls._merge_columns(entity, existing_entity, include_children=True)\n cls._append_children(existing_parent, entity)\n cls._sync_parent(session, existing_parent, new_entities, existing_entities)\n return entity\n<|end_body_2|>\n\n<|body_start_3|>\n entity.children = list({**{e.entity_key: e for e in entity.children}, **{e.entity_key: e for e in children}}.values())\n for child in children:\n child.parent = entity\n if entity.id:\n child.parent_id = entity.id\n<|end_body_3|>\n\n<|body_start_4|>\n columns = [col.key for col in entity.get_columns()]\n for col in columns:\n if col == 'meta':\n existing_entity.meta = {**(existing_entity.meta or {}), **(entity.meta or {})}\n elif col not in ('id', 'created_at'):\n try:\n setattr(existing_entity, col, getattr(entity, col))\n except ObjectDeletedError as e:\n logger.warning('Could not set %s on entity <%s>: %s', col, existing_entity.entity_key, e)\n if include_children:\n existing_children = {e.entity_key: e for e in existing_entity.children}\n new_children = {e.entity_key: e for e in entity.children}\n updated_children = {}\n for key, child in new_children.items():\n existing_child = existing_children.get(key)\n updated_children[key] = cls._merge_columns(child, existing_child, include_children=True) if existing_child else child\n cls._append_children(existing_entity, *updated_children.values())\n return existing_entity\n<|end_body_4|>\n", "class_docstring": "A stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.", "class_name": "EntitiesMerger", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EntitiesMerger:\n \"\"\"A stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.\"\"\"\n\n def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]:\n \"\"\"Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.\"\"\"\n <|body_0|>\n\n def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]:\n \"\"\"(Recursive) inner implementation of the entity merge logic.\"\"\"\n <|body_1|>\n\n def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity:\n \"\"\"Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.\"\"\"\n <|body_2|>\n\n def _append_children(entity: Entity, *children: Entity):\n \"\"\"Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.\"\"\"\n <|body_3|>\n\n def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity:\n \"\"\"Merge two versions of an entity column by column.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n existing_entities = existing_entities or {}\n new_entities: EntityMapping = {}\n self._merge(session, entities, new_entities=new_entities, existing_entities=existing_entities)\n return list({**existing_entities, **new_entities}.values())\n<|end_body_0|>\n\n<|body_start_1|>\n processed_entities = []\n for entity in entities:\n key = entity.entity_key\n existing_entity = existing_entities.get(key, new_entities.get(key))\n entity = self._sync_parent(session, entity, new_entities, existing_entities)\n if existing_entity:\n existing_entity = self._merge_columns(entity, existing_entity)\n self._append_children(existing_entity, *self._merge(session, entity.children, new_entities, existing_entities))\n entity = existing_entity\n else:\n new_entities[key] = entity\n processed_entities.append(entity)\n return processed_entities\n<|end_body_1|>\n\n<|body_start_2|>\n parent = get_parent(session, entity)\n if not parent:\n return entity\n existing_parent = existing_entities.get(parent.entity_key, new_entities.get(parent.entity_key))\n if not existing_parent:\n return entity\n existing_entity = next(iter((child for child in existing_parent.children if child.entity_key == entity.entity_key)), None)\n if not existing_entity:\n entity.parent = None\n else:\n entity = cls._merge_columns(entity, existing_entity, include_children=True)\n cls._append_children(existing_parent, entity)\n cls._sync_parent(session, existing_parent, new_entities, existing_entities)\n return entity\n<|end_body_2|>\n\n<|body_start_3|>\n entity.children = list({**{e.entity_key: e for e in entity.children}, **{e.entity_key: e for e in children}}.values())\n for child in children:\n child.parent = entity\n if entity.id:\n child.parent_id = entity.id\n<|end_body_3|>\n\n<|body_start_4|>\n columns = [col.key for col in entity.get_columns()]\n for col in columns:\n if col == 'meta':\n existing_entity.meta = {**(existing_entity.meta or {}), **(entity.meta or {})}\n elif col not in ('id', 'created_at'):\n try:\n setattr(existing_entity, col, getattr(entity, col))\n except ObjectDeletedError as e:\n logger.warning('Could not set %s on entity <%s>: %s', col, existing_entity.entity_key, e)\n if include_children:\n existing_children = {e.entity_key: e for e in existing_entity.children}\n new_children = {e.entity_key: e for e in entity.children}\n updated_children = {}\n for key, child in new_children.items():\n existing_child = existing_children.get(key)\n updated_children[key] = cls._merge_columns(child, existing_child, include_children=True) if existing_child else child\n cls._append_children(existing_entity, *updated_children.values())\n return existing_entity\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000341", "length_bytes": 7489, "license_type": "permissive", "methods": [{"docstring": "Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.", "name": "__call__", "signature": "def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]"}, {"docstring": "(Recursive) inner implementation of the entity merge logic.", "name": "_merge", "signature": "def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]"}, {"docstring": "Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.", "name": "_sync_parent", "signature": "def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity"}, {"docstring": "Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.", "name": "_append_children", "signature": "def _append_children(entity: Entity, *children: Entity)"}, {"docstring": "Merge two versions of an entity column by column.", "name": "_merge_columns", "signature": "def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_016483", "prompt": "Implement the Python class `EntitiesMerger` described below.\n\nClass description:\nA stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.\n\nMethod signatures and docstrings:\n- def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]: Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.\n- def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]: (Recursive) inner implementation of the entity merge logic.\n- def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity: Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.\n- def _append_children(entity: Entity, *children: Entity): Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.\n- def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity: Merge two versions of an entity column by column.", "prompted_full_text": "Implement the Python class `EntitiesMerger` described below.\n\nClass description:\nA stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.\n\nMethod signatures and docstrings:\n- def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]: Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.\n- def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]: (Recursive) inner implementation of the entity merge logic.\n- def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity: Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.\n- def _append_children(entity: Entity, *children: Entity): Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.\n- def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity: Merge two versions of an entity column by column.\n\n<|skeleton|>\nclass EntitiesMerger:\n \"\"\"A stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.\"\"\"\n\n def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]:\n \"\"\"Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.\"\"\"\n <|body_0|>\n\n def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]:\n \"\"\"(Recursive) inner implementation of the entity merge logic.\"\"\"\n <|body_1|>\n\n def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity:\n \"\"\"Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.\"\"\"\n <|body_2|>\n\n def _append_children(entity: Entity, *children: Entity):\n \"\"\"Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.\"\"\"\n <|body_3|>\n\n def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity:\n \"\"\"Merge two versions of an entity column by column.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n existing_entities = existing_entities or {}\n new_entities: EntityMapping = {}\n self._merge(session, entities, new_entities=new_entities, existing_entities=existing_entities)\n return list({**existing_entities, **new_entities}.values())\n<|end_body_0|>\n\n<|body_start_1|>\n processed_entities = []\n for entity in entities:\n key = entity.entity_key\n existing_entity = existing_entities.get(key, new_entities.get(key))\n entity = self._sync_parent(session, entity, new_entities, existing_entities)\n if existing_entity:\n existing_entity = self._merge_columns(entity, existing_entity)\n self._append_children(existing_entity, *self._merge(session, entity.children, new_entities, existing_entities))\n entity = existing_entity\n else:\n new_entities[key] = entity\n processed_entities.append(entity)\n return processed_entities\n<|end_body_1|>\n\n<|body_start_2|>\n parent = get_parent(session, entity)\n if not parent:\n return entity\n existing_parent = existing_entities.get(parent.entity_key, new_entities.get(parent.entity_key))\n if not existing_parent:\n return entity\n existing_entity = next(iter((child for child in existing_parent.children if child.entity_key == entity.entity_key)), None)\n if not existing_entity:\n entity.parent = None\n else:\n entity = cls._merge_columns(entity, existing_entity, include_children=True)\n cls._append_children(existing_parent, entity)\n cls._sync_parent(session, existing_parent, new_entities, existing_entities)\n return entity\n<|end_body_2|>\n\n<|body_start_3|>\n entity.children = list({**{e.entity_key: e for e in entity.children}, **{e.entity_key: e for e in children}}.values())\n for child in children:\n child.parent = entity\n if entity.id:\n child.parent_id = entity.id\n<|end_body_3|>\n\n<|body_start_4|>\n columns = [col.key for col in entity.get_columns()]\n for col in columns:\n if col == 'meta':\n existing_entity.meta = {**(existing_entity.meta or {}), **(entity.meta or {})}\n elif col not in ('id', 'created_at'):\n try:\n setattr(existing_entity, col, getattr(entity, col))\n except ObjectDeletedError as e:\n logger.warning('Could not set %s on entity <%s>: %s', col, existing_entity.entity_key, e)\n if include_children:\n existing_children = {e.entity_key: e for e in existing_entity.children}\n new_children = {e.entity_key: e for e in entity.children}\n updated_children = {}\n for key, child in new_children.items():\n existing_child = existing_children.get(key)\n updated_children[key] = cls._merge_columns(child, existing_child, include_children=True) if existing_child else child\n cls._append_children(existing_entity, *updated_children.values())\n return existing_entity\n<|end_body_4|>\n", "revision_id": "446bc2f67493d3554c5422242ff91d5b5c76d78a", "skeleton": "<|skeleton|>\nclass EntitiesMerger:\n \"\"\"A stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.\"\"\"\n\n def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]:\n \"\"\"Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.\"\"\"\n <|body_0|>\n\n def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]:\n \"\"\"(Recursive) inner implementation of the entity merge logic.\"\"\"\n <|body_1|>\n\n def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity:\n \"\"\"Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.\"\"\"\n <|body_2|>\n\n def _append_children(entity: Entity, *children: Entity):\n \"\"\"Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.\"\"\"\n <|body_3|>\n\n def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity:\n \"\"\"Merge two versions of an entity column by column.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EntitiesMerger:\n \"\"\"A stateless functor in charge of detecting and merging entities that already exist on the database before flushing the session.\"\"\"\n\n def __call__(self, session: Session, entities: Iterable[Entity], existing_entities: Optional[EntityMapping]=None) -> List[Entity]:\n \"\"\"Merge a set of entities with their existing representations and update the parent/child relationships and return a list containing ``[*updated_entities, *new_entities]``.\"\"\"\n existing_entities = existing_entities or {}\n new_entities: EntityMapping = {}\n self._merge(session, entities, new_entities=new_entities, existing_entities=existing_entities)\n return list({**existing_entities, **new_entities}.values())\n\n def _merge(self, session: Session, entities: Iterable[Entity], new_entities: EntityMapping, existing_entities: EntityMapping) -> List[Entity]:\n \"\"\"(Recursive) inner implementation of the entity merge logic.\"\"\"\n processed_entities = []\n for entity in entities:\n key = entity.entity_key\n existing_entity = existing_entities.get(key, new_entities.get(key))\n entity = self._sync_parent(session, entity, new_entities, existing_entities)\n if existing_entity:\n existing_entity = self._merge_columns(entity, existing_entity)\n self._append_children(existing_entity, *self._merge(session, entity.children, new_entities, existing_entities))\n entity = existing_entity\n else:\n new_entities[key] = entity\n processed_entities.append(entity)\n return processed_entities\n\n def _sync_parent(cls, session: Session, entity: Entity, new_entities: EntityMapping, existing_entities: EntityMapping) -> Entity:\n \"\"\"Recursively refresh the parent of an entity all the way up in the hierarchy, to make sure that all the parent/child relations are appropriately rewired and that all the relevant objects are added to this session.\"\"\"\n parent = get_parent(session, entity)\n if not parent:\n return entity\n existing_parent = existing_entities.get(parent.entity_key, new_entities.get(parent.entity_key))\n if not existing_parent:\n return entity\n existing_entity = next(iter((child for child in existing_parent.children if child.entity_key == entity.entity_key)), None)\n if not existing_entity:\n entity.parent = None\n else:\n entity = cls._merge_columns(entity, existing_entity, include_children=True)\n cls._append_children(existing_parent, entity)\n cls._sync_parent(session, existing_parent, new_entities, existing_entities)\n return entity\n\n def _append_children(entity: Entity, *children: Entity):\n \"\"\"Update the list of children of a given entity with the given list of entities. Note that, in case of ``entity_key`` conflict (the key of a new entity already exists in the entity's children), the most recent version will be used, so any column merge logic needs to happen before this method is called.\"\"\"\n entity.children = list({**{e.entity_key: e for e in entity.children}, **{e.entity_key: e for e in children}}.values())\n for child in children:\n child.parent = entity\n if entity.id:\n child.parent_id = entity.id\n\n def _merge_columns(cls, entity: Entity, existing_entity: Entity, include_children: bool=False) -> Entity:\n \"\"\"Merge two versions of an entity column by column.\"\"\"\n columns = [col.key for col in entity.get_columns()]\n for col in columns:\n if col == 'meta':\n existing_entity.meta = {**(existing_entity.meta or {}), **(entity.meta or {})}\n elif col not in ('id', 'created_at'):\n try:\n setattr(existing_entity, col, getattr(entity, col))\n except ObjectDeletedError as e:\n logger.warning('Could not set %s on entity <%s>: %s', col, existing_entity.entity_key, e)\n if include_children:\n existing_children = {e.entity_key: e for e in existing_entity.children}\n new_children = {e.entity_key: e for e in entity.children}\n updated_children = {}\n for key, child in new_children.items():\n existing_child = existing_children.get(key)\n updated_children[key] = cls._merge_columns(child, existing_child, include_children=True) if existing_child else child\n cls._append_children(existing_entity, *updated_children.values())\n return existing_entity\n", "source": "the_stack_v2_python_sparse", "source_path": "platypush/entities/_engine/repo/merger.py", "source_repo": "BlackLight/platypush", "split": "test", "star_events_count": 265} {"blob_id": "9961c157ffdcc5c5612c26af9de2a8f63408f2bc", "bodies": ["result = pq.setup()\nvertices = result[0]\nnode_edges = result[1]\nself.assertEqual(vertices[-1], 200, 'The vertices list has not imported correctly.')\nexpected = [149, 155, 52, 87, 120, 39, 160, 137, 27, 79, 131, 100, 25, 55, 23, 126, 84, 166, 150, 62, 67, 1, 69, 35]\nself.assertEqual(node_edges[200], expected)", "vertices = [1, 2, 3, 4]\nnode_edges = defaultdict(list)\nnode_edges = {1: [2, 3], 2: [1, 3, 4], 3: [1, 2, 4], 4: [2, 3]}\nexpected = 2\ngot = pq.find_min_cuts(vertices, node_edges)\nself.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\nvertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nnode_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9]}\nexpected = 2\ngot = pq.find_min_cuts(vertices, node_edges)\nself.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\nvertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\nnode_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7, 14], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9], 11: [12, 13, 14, 15], 12: [11, 13, 14, 15], 13: [12, 11, 14, 15], 14: [12, 13, 11, 15, 4], 15: [12, 13, 14, 11]}\nexpected = 1\ngot = pq.find_min_cuts(vertices, node_edges)\nself.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))"], "bodies_text": "<|body_start_0|>\n result = pq.setup()\n vertices = result[0]\n node_edges = result[1]\n self.assertEqual(vertices[-1], 200, 'The vertices list has not imported correctly.')\n expected = [149, 155, 52, 87, 120, 39, 160, 137, 27, 79, 131, 100, 25, 55, 23, 126, 84, 166, 150, 62, 67, 1, 69, 35]\n self.assertEqual(node_edges[200], expected)\n<|end_body_0|>\n\n<|body_start_1|>\n vertices = [1, 2, 3, 4]\n node_edges = defaultdict(list)\n node_edges = {1: [2, 3], 2: [1, 3, 4], 3: [1, 2, 4], 4: [2, 3]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7, 14], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9], 11: [12, 13, 14, 15], 12: [11, 13, 14, 15], 13: [12, 11, 14, 15], 14: [12, 13, 11, 15, 4], 15: [12, 13, 14, 11]}\n expected = 1\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TestPQ3", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestPQ3:\n\n def test_setup(self):\n \"\"\"Test to ensure lists are set up correctly for the problem.\"\"\"\n <|body_0|>\n\n def test_find_min_cuts(self):\n \"\"\"Test to ensure min cut returns correct value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = pq.setup()\n vertices = result[0]\n node_edges = result[1]\n self.assertEqual(vertices[-1], 200, 'The vertices list has not imported correctly.')\n expected = [149, 155, 52, 87, 120, 39, 160, 137, 27, 79, 131, 100, 25, 55, 23, 126, 84, 166, 150, 62, 67, 1, 69, 35]\n self.assertEqual(node_edges[200], expected)\n<|end_body_0|>\n\n<|body_start_1|>\n vertices = [1, 2, 3, 4]\n node_edges = defaultdict(list)\n node_edges = {1: [2, 3], 2: [1, 3, 4], 3: [1, 2, 4], 4: [2, 3]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7, 14], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9], 11: [12, 13, 14, 15], 12: [11, 13, 14, 15], 13: [12, 11, 14, 15], 14: [12, 13, 11, 15, 4], 15: [12, 13, 14, 11]}\n expected = 1\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000342", "length_bytes": 2839, "license_type": "permissive", "methods": [{"docstring": "Test to ensure lists are set up correctly for the problem.", "name": "test_setup", "signature": "def test_setup(self)"}, {"docstring": "Test to ensure min cut returns correct value.", "name": "test_find_min_cuts", "signature": "def test_find_min_cuts(self)"}], "n_methods": 2, "prompt": "Implement the Python class `TestPQ3` described below.\n\nClass description:\nImplement the TestPQ3 class.\n\nMethod signatures and docstrings:\n- def test_setup(self): Test to ensure lists are set up correctly for the problem.\n- def test_find_min_cuts(self): Test to ensure min cut returns correct value.", "prompted_full_text": "Implement the Python class `TestPQ3` described below.\n\nClass description:\nImplement the TestPQ3 class.\n\nMethod signatures and docstrings:\n- def test_setup(self): Test to ensure lists are set up correctly for the problem.\n- def test_find_min_cuts(self): Test to ensure min cut returns correct value.\n\n<|skeleton|>\nclass TestPQ3:\n\n def test_setup(self):\n \"\"\"Test to ensure lists are set up correctly for the problem.\"\"\"\n <|body_0|>\n\n def test_find_min_cuts(self):\n \"\"\"Test to ensure min cut returns correct value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = pq.setup()\n vertices = result[0]\n node_edges = result[1]\n self.assertEqual(vertices[-1], 200, 'The vertices list has not imported correctly.')\n expected = [149, 155, 52, 87, 120, 39, 160, 137, 27, 79, 131, 100, 25, 55, 23, 126, 84, 166, 150, 62, 67, 1, 69, 35]\n self.assertEqual(node_edges[200], expected)\n<|end_body_0|>\n\n<|body_start_1|>\n vertices = [1, 2, 3, 4]\n node_edges = defaultdict(list)\n node_edges = {1: [2, 3], 2: [1, 3, 4], 3: [1, 2, 4], 4: [2, 3]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7, 14], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9], 11: [12, 13, 14, 15], 12: [11, 13, 14, 15], 13: [12, 11, 14, 15], 14: [12, 13, 11, 15, 4], 15: [12, 13, 14, 11]}\n expected = 1\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n<|end_body_1|>\n", "revision_id": "82605a1dea4e52480f006956645e812fe2cb02dc", "skeleton": "<|skeleton|>\nclass TestPQ3:\n\n def test_setup(self):\n \"\"\"Test to ensure lists are set up correctly for the problem.\"\"\"\n <|body_0|>\n\n def test_find_min_cuts(self):\n \"\"\"Test to ensure min cut returns correct value.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestPQ3:\n def test_setup(self):\n \"\"\"Test to ensure lists are set up correctly for the problem.\"\"\"\n result = pq.setup()\n vertices = result[0]\n node_edges = result[1]\n self.assertEqual(vertices[-1], 200, 'The vertices list has not imported correctly.')\n expected = [149, 155, 52, 87, 120, 39, 160, 137, 27, 79, 131, 100, 25, 55, 23, 126, 84, 166, 150, 62, 67, 1, 69, 35]\n self.assertEqual(node_edges[200], expected)\n\n def test_find_min_cuts(self):\n \"\"\"Test to ensure min cut returns correct value.\"\"\"\n vertices = [1, 2, 3, 4]\n node_edges = defaultdict(list)\n node_edges = {1: [2, 3], 2: [1, 3, 4], 3: [1, 2, 4], 4: [2, 3]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9]}\n expected = 2\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n vertices = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n node_edges = {1: [2, 3, 4, 5], 2: [1, 3, 4, 5], 3: [1, 2, 4, 5, 6], 4: [1, 2, 3, 5, 7, 14], 5: [1, 2, 3, 4], 6: [7, 8, 9, 10, 3], 7: [6, 8, 9, 10, 4], 8: [6, 7, 9, 10], 9: [6, 7, 8, 10], 10: [6, 7, 8, 9], 11: [12, 13, 14, 15], 12: [11, 13, 14, 15], 13: [12, 11, 14, 15], 14: [12, 13, 11, 15, 4], 15: [12, 13, 14, 11]}\n expected = 1\n got = pq.find_min_cuts(vertices, node_edges)\n self.assertEqual(expected, got, 'Min cut is not correct on length {} test'.format(len(vertices)))\n", "source": "the_stack_v2_python_sparse", "source_path": "Stanford/ProgrammingQuestion3_MinimumCut/test_pq3.py", "source_repo": "jeffvswanson/DataStructuresAndAlgorithms", "split": "test", "star_events_count": 4} {"blob_id": "f9ce0754a2404c1abc3164ccd222d8899b635456", "bodies": ["if config_path:\n cls.CONFIG_PATH = config_path\nif cls.CONFIG_PATH:\n try:\n if isfile(cls.CONFIG_PATH):\n with open(cls.CONFIG_PATH) as config_file:\n config = json.loads(config_file.read())\n for key, value in config.items():\n if value:\n setattr(cls, key.upper(), value)\n else:\n logger.warning('Config path {} is not a file.'.format(cls.CONFIG_PATH))\n except (IOError, OSError, ValueError):\n logger.warning('Could not read config file.', exc_info=True)\ncls.SQLALCHEMY_DATABASE_URI = format_db_path(cls.WALKOFF_DB_TYPE, cls.DB_PATH, 'WALKOFF_DB_USERNAME', 'WALKOFF_DB_PASSWORD')", "if keys is None:\n keys = [key for key in dir(cls) if not key.startswith('__')]\noutput = {}\nfor key in keys:\n if hasattr(cls, key.upper()):\n output[key.lower()] = getattr(cls, key.upper())\nwith open(cls.CONFIG_PATH, 'w') as config_file:\n config_file.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))"], "bodies_text": "<|body_start_0|>\n if config_path:\n cls.CONFIG_PATH = config_path\n if cls.CONFIG_PATH:\n try:\n if isfile(cls.CONFIG_PATH):\n with open(cls.CONFIG_PATH) as config_file:\n config = json.loads(config_file.read())\n for key, value in config.items():\n if value:\n setattr(cls, key.upper(), value)\n else:\n logger.warning('Config path {} is not a file.'.format(cls.CONFIG_PATH))\n except (IOError, OSError, ValueError):\n logger.warning('Could not read config file.', exc_info=True)\n cls.SQLALCHEMY_DATABASE_URI = format_db_path(cls.WALKOFF_DB_TYPE, cls.DB_PATH, 'WALKOFF_DB_USERNAME', 'WALKOFF_DB_PASSWORD')\n<|end_body_0|>\n\n<|body_start_1|>\n if keys is None:\n keys = [key for key in dir(cls) if not key.startswith('__')]\n output = {}\n for key in keys:\n if hasattr(cls, key.upper()):\n output[key.lower()] = getattr(cls, key.upper())\n with open(cls.CONFIG_PATH, 'w') as config_file:\n config_file.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Config", "detected_licenses": ["CC0-1.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Config:\n\n def load_config(cls, config_path=None):\n \"\"\"Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.\"\"\"\n <|body_0|>\n\n def write_values_to_file(cls, keys=None):\n \"\"\"Writes the current walkoff configuration to a file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config_path:\n cls.CONFIG_PATH = config_path\n if cls.CONFIG_PATH:\n try:\n if isfile(cls.CONFIG_PATH):\n with open(cls.CONFIG_PATH) as config_file:\n config = json.loads(config_file.read())\n for key, value in config.items():\n if value:\n setattr(cls, key.upper(), value)\n else:\n logger.warning('Config path {} is not a file.'.format(cls.CONFIG_PATH))\n except (IOError, OSError, ValueError):\n logger.warning('Could not read config file.', exc_info=True)\n cls.SQLALCHEMY_DATABASE_URI = format_db_path(cls.WALKOFF_DB_TYPE, cls.DB_PATH, 'WALKOFF_DB_USERNAME', 'WALKOFF_DB_PASSWORD')\n<|end_body_0|>\n\n<|body_start_1|>\n if keys is None:\n keys = [key for key in dir(cls) if not key.startswith('__')]\n output = {}\n for key in keys:\n if hasattr(cls, key.upper()):\n output[key.lower()] = getattr(cls, key.upper())\n with open(cls.CONFIG_PATH, 'w') as config_file:\n config_file.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000343", "length_bytes": 6761, "license_type": "permissive", "methods": [{"docstring": "Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.", "name": "load_config", "signature": "def load_config(cls, config_path=None)"}, {"docstring": "Writes the current walkoff configuration to a file", "name": "write_values_to_file", "signature": "def write_values_to_file(cls, keys=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_009071", "prompt": "Implement the Python class `Config` described below.\n\nClass description:\nImplement the Config class.\n\nMethod signatures and docstrings:\n- def load_config(cls, config_path=None): Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.\n- def write_values_to_file(cls, keys=None): Writes the current walkoff configuration to a file", "prompted_full_text": "Implement the Python class `Config` described below.\n\nClass description:\nImplement the Config class.\n\nMethod signatures and docstrings:\n- def load_config(cls, config_path=None): Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.\n- def write_values_to_file(cls, keys=None): Writes the current walkoff configuration to a file\n\n<|skeleton|>\nclass Config:\n\n def load_config(cls, config_path=None):\n \"\"\"Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.\"\"\"\n <|body_0|>\n\n def write_values_to_file(cls, keys=None):\n \"\"\"Writes the current walkoff configuration to a file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if config_path:\n cls.CONFIG_PATH = config_path\n if cls.CONFIG_PATH:\n try:\n if isfile(cls.CONFIG_PATH):\n with open(cls.CONFIG_PATH) as config_file:\n config = json.loads(config_file.read())\n for key, value in config.items():\n if value:\n setattr(cls, key.upper(), value)\n else:\n logger.warning('Config path {} is not a file.'.format(cls.CONFIG_PATH))\n except (IOError, OSError, ValueError):\n logger.warning('Could not read config file.', exc_info=True)\n cls.SQLALCHEMY_DATABASE_URI = format_db_path(cls.WALKOFF_DB_TYPE, cls.DB_PATH, 'WALKOFF_DB_USERNAME', 'WALKOFF_DB_PASSWORD')\n<|end_body_0|>\n\n<|body_start_1|>\n if keys is None:\n keys = [key for key in dir(cls) if not key.startswith('__')]\n output = {}\n for key in keys:\n if hasattr(cls, key.upper()):\n output[key.lower()] = getattr(cls, key.upper())\n with open(cls.CONFIG_PATH, 'w') as config_file:\n config_file.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))\n<|end_body_1|>\n", "revision_id": "f549633f831d34b702dfe2e77d678216fd6d7931", "skeleton": "<|skeleton|>\nclass Config:\n\n def load_config(cls, config_path=None):\n \"\"\"Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.\"\"\"\n <|body_0|>\n\n def write_values_to_file(cls, keys=None):\n \"\"\"Writes the current walkoff configuration to a file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Config:\n def load_config(cls, config_path=None):\n \"\"\"Loads Walkoff configuration from JSON file Args: config_path (str): Optional path to the config. Defaults to the CONFIG_PATH class variable.\"\"\"\n if config_path:\n cls.CONFIG_PATH = config_path\n if cls.CONFIG_PATH:\n try:\n if isfile(cls.CONFIG_PATH):\n with open(cls.CONFIG_PATH) as config_file:\n config = json.loads(config_file.read())\n for key, value in config.items():\n if value:\n setattr(cls, key.upper(), value)\n else:\n logger.warning('Config path {} is not a file.'.format(cls.CONFIG_PATH))\n except (IOError, OSError, ValueError):\n logger.warning('Could not read config file.', exc_info=True)\n cls.SQLALCHEMY_DATABASE_URI = format_db_path(cls.WALKOFF_DB_TYPE, cls.DB_PATH, 'WALKOFF_DB_USERNAME', 'WALKOFF_DB_PASSWORD')\n\n def write_values_to_file(cls, keys=None):\n \"\"\"Writes the current walkoff configuration to a file\"\"\"\n if keys is None:\n keys = [key for key in dir(cls) if not key.startswith('__')]\n output = {}\n for key in keys:\n if hasattr(cls, key.upper()):\n output[key.lower()] = getattr(cls, key.upper())\n with open(cls.CONFIG_PATH, 'w') as config_file:\n config_file.write(json.dumps(output, sort_keys=True, indent=4, separators=(',', ': ')))\n", "source": "the_stack_v2_python_sparse", "source_path": "walkoff/config.py", "source_repo": "MikeDeane/WALKOFF", "split": "test", "star_events_count": 0} {"blob_id": "22bd9a3c29a00c3defc7e50771d3cbd01c93cce1", "bodies": ["dic = {}\nm, n = (head, head)\nwhile m:\n dic[m] = RandomListNode(m.label)\n m = m.next\nwhile n:\n dic[n].next = dic.get(n.next, None)\n dic[n].random = dic.get(n.random, None)\n n = n.next\nreturn dic.get(head, None)", "if not head:\n return None\np = head\nwhile p:\n node = RandomListNode(p.label)\n node.next = p.next\n p.next = node\n p = p.next.next\np = head\nwhile p:\n if p.random:\n p.next.random = p.random.next\n p = p.next.next\nnewhead = head.next\npold = head\npnew = newhead\nwhile pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\npold.next = None\npnew.next = None\nreturn newhead"], "bodies_text": "<|body_start_0|>\n dic = {}\n m, n = (head, head)\n while m:\n dic[m] = RandomListNode(m.label)\n m = m.next\n while n:\n dic[n].next = dic.get(n.next, None)\n dic[n].random = dic.get(n.random, None)\n n = n.next\n return dic.get(head, None)\n<|end_body_0|>\n\n<|body_start_1|>\n if not head:\n return None\n p = head\n while p:\n node = RandomListNode(p.label)\n node.next = p.next\n p.next = node\n p = p.next.next\n p = head\n while p:\n if p.random:\n p.next.random = p.random.next\n p = p.next.next\n newhead = head.next\n pold = head\n pnew = newhead\n while pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\n pold.next = None\n pnew.next = None\n return newhead\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def copyRandomList_two_pass(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n <|body_0|>\n\n def copyRandomList_on_dict(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dic = {}\n m, n = (head, head)\n while m:\n dic[m] = RandomListNode(m.label)\n m = m.next\n while n:\n dic[n].next = dic.get(n.next, None)\n dic[n].random = dic.get(n.random, None)\n n = n.next\n return dic.get(head, None)\n<|end_body_0|>\n\n<|body_start_1|>\n if not head:\n return None\n p = head\n while p:\n node = RandomListNode(p.label)\n node.next = p.next\n p.next = node\n p = p.next.next\n p = head\n while p:\n if p.random:\n p.next.random = p.random.next\n p = p.next.next\n newhead = head.next\n pold = head\n pnew = newhead\n while pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\n pold.next = None\n pnew.next = None\n return newhead\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000344", "length_bytes": 1639, "license_type": "no_license", "methods": [{"docstring": ":type head: RandomListNode :rtype: RandomListNode", "name": "copyRandomList_two_pass", "signature": "def copyRandomList_two_pass(self, head)"}, {"docstring": ":type head: RandomListNode :rtype: RandomListNode", "name": "copyRandomList_on_dict", "signature": "def copyRandomList_on_dict(self, head)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def copyRandomList_two_pass(self, head): :type head: RandomListNode :rtype: RandomListNode\n- def copyRandomList_on_dict(self, head): :type head: RandomListNode :rtype: RandomListNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def copyRandomList_two_pass(self, head): :type head: RandomListNode :rtype: RandomListNode\n- def copyRandomList_on_dict(self, head): :type head: RandomListNode :rtype: RandomListNode\n\n<|skeleton|>\nclass Solution:\n\n def copyRandomList_two_pass(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n <|body_0|>\n\n def copyRandomList_on_dict(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dic = {}\n m, n = (head, head)\n while m:\n dic[m] = RandomListNode(m.label)\n m = m.next\n while n:\n dic[n].next = dic.get(n.next, None)\n dic[n].random = dic.get(n.random, None)\n n = n.next\n return dic.get(head, None)\n<|end_body_0|>\n\n<|body_start_1|>\n if not head:\n return None\n p = head\n while p:\n node = RandomListNode(p.label)\n node.next = p.next\n p.next = node\n p = p.next.next\n p = head\n while p:\n if p.random:\n p.next.random = p.random.next\n p = p.next.next\n newhead = head.next\n pold = head\n pnew = newhead\n while pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\n pold.next = None\n pnew.next = None\n return newhead\n<|end_body_1|>\n", "revision_id": "0e99f9a5226507706b3ee66fd04bae813755ef40", "skeleton": "<|skeleton|>\nclass Solution:\n\n def copyRandomList_two_pass(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n <|body_0|>\n\n def copyRandomList_on_dict(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def copyRandomList_two_pass(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n dic = {}\n m, n = (head, head)\n while m:\n dic[m] = RandomListNode(m.label)\n m = m.next\n while n:\n dic[n].next = dic.get(n.next, None)\n dic[n].random = dic.get(n.random, None)\n n = n.next\n return dic.get(head, None)\n\n def copyRandomList_on_dict(self, head):\n \"\"\":type head: RandomListNode :rtype: RandomListNode\"\"\"\n if not head:\n return None\n p = head\n while p:\n node = RandomListNode(p.label)\n node.next = p.next\n p.next = node\n p = p.next.next\n p = head\n while p:\n if p.random:\n p.next.random = p.random.next\n p = p.next.next\n newhead = head.next\n pold = head\n pnew = newhead\n while pnew.next:\n pold.next = pnew.next\n pold = pold.next\n pnew.next = pold.next\n pnew = pnew.next\n pold.next = None\n pnew.next = None\n return newhead\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/linklist/test_138_Copy_List_with_Random_Pointer.py", "source_repo": "wuxu1019/leetcode_sophia", "split": "test", "star_events_count": 1} {"blob_id": "1a4a7e9e7ceab433b01cae7c2155beaa9aaf4ae7", "bodies": ["if obj.objectName() == 'MainWindowWindow':\n return returnValue\nif obj == self.last:\n return returnValue\nelse:\n self.last = obj\nif isinstance(obj, PyQt5.QtWidgets.QTabBar):\n self.log.warning(f'Click Tab : [{obj.tabText(obj.currentIndex())}]')\nelif isinstance(obj, PyQt5.QtWidgets.QComboBox):\n self.log.warning(f'Click DropDown: [{obj.objectName()}]')\nelif isinstance(obj, PyQt5.QtWidgets.QPushButton):\n self.log.warning(f'Click Button : [{obj.objectName()}]')\nelif isinstance(obj, PyQt5.QtWidgets.QRadioButton):\n self.log.warning(f'Click Radio : [{obj.objectName()}]:{obj.isChecked()}')\nelif isinstance(obj, PyQt5.QtWidgets.QGroupBox):\n self.log.warning(f'Click Group : [{obj.objectName()}]:{obj.isChecked()}')\nelif isinstance(obj, PyQt5.QtWidgets.QCheckBox):\n self.log.warning(f'Click Checkbox: [{obj.objectName()}]:{obj.isChecked()}')\nelif isinstance(obj, PyQt5.QtWidgets.QLineEdit):\n self.log.warning(f'Click EditLine: [{obj.objectName()}]:{obj.text()}')\nelse:\n self.log.warning(f'Click Object : [{obj.objectName()}]')\nreturn returnValue", "try:\n returnValue = PyQt5.QtWidgets.QApplication.notify(self, obj, event)\nexcept Exception as e:\n self.log.critical('----------------------------------------------------')\n self.log.critical('Event: {0}'.format(event))\n self.log.critical('EventType: {0}'.format(event.type()))\n self.log.critical('Exception error in event loop: {0}'.format(e))\n self.log.critical('----------------------------------------------------')\n returnValue = False\nif not isinstance(event, PyQt5.QtGui.QMouseEvent):\n return returnValue\nif not event.button():\n return returnValue\nreturnValue = self.handleButtons(obj, returnValue)\nreturn returnValue"], "bodies_text": "<|body_start_0|>\n if obj.objectName() == 'MainWindowWindow':\n return returnValue\n if obj == self.last:\n return returnValue\n else:\n self.last = obj\n if isinstance(obj, PyQt5.QtWidgets.QTabBar):\n self.log.warning(f'Click Tab : [{obj.tabText(obj.currentIndex())}]')\n elif isinstance(obj, PyQt5.QtWidgets.QComboBox):\n self.log.warning(f'Click DropDown: [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QPushButton):\n self.log.warning(f'Click Button : [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QRadioButton):\n self.log.warning(f'Click Radio : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QGroupBox):\n self.log.warning(f'Click Group : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QCheckBox):\n self.log.warning(f'Click Checkbox: [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QLineEdit):\n self.log.warning(f'Click EditLine: [{obj.objectName()}]:{obj.text()}')\n else:\n self.log.warning(f'Click Object : [{obj.objectName()}]')\n return returnValue\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n returnValue = PyQt5.QtWidgets.QApplication.notify(self, obj, event)\n except Exception as e:\n self.log.critical('----------------------------------------------------')\n self.log.critical('Event: {0}'.format(event))\n self.log.critical('EventType: {0}'.format(event.type()))\n self.log.critical('Exception error in event loop: {0}'.format(e))\n self.log.critical('----------------------------------------------------')\n returnValue = False\n if not isinstance(event, PyQt5.QtGui.QMouseEvent):\n return returnValue\n if not event.button():\n return returnValue\n returnValue = self.handleButtons(obj, returnValue)\n return returnValue\n<|end_body_1|>\n", "class_docstring": "MyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.", "class_name": "MyApp", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MyApp:\n \"\"\"MyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.\"\"\"\n\n def handleButtons(self, obj, returnValue):\n \"\"\":param obj: :param returnValue: :return:\"\"\"\n <|body_0|>\n\n def notify(self, obj, event):\n \"\"\":param obj: :param event: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if obj.objectName() == 'MainWindowWindow':\n return returnValue\n if obj == self.last:\n return returnValue\n else:\n self.last = obj\n if isinstance(obj, PyQt5.QtWidgets.QTabBar):\n self.log.warning(f'Click Tab : [{obj.tabText(obj.currentIndex())}]')\n elif isinstance(obj, PyQt5.QtWidgets.QComboBox):\n self.log.warning(f'Click DropDown: [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QPushButton):\n self.log.warning(f'Click Button : [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QRadioButton):\n self.log.warning(f'Click Radio : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QGroupBox):\n self.log.warning(f'Click Group : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QCheckBox):\n self.log.warning(f'Click Checkbox: [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QLineEdit):\n self.log.warning(f'Click EditLine: [{obj.objectName()}]:{obj.text()}')\n else:\n self.log.warning(f'Click Object : [{obj.objectName()}]')\n return returnValue\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n returnValue = PyQt5.QtWidgets.QApplication.notify(self, obj, event)\n except Exception as e:\n self.log.critical('----------------------------------------------------')\n self.log.critical('Event: {0}'.format(event))\n self.log.critical('EventType: {0}'.format(event.type()))\n self.log.critical('Exception error in event loop: {0}'.format(e))\n self.log.critical('----------------------------------------------------')\n returnValue = False\n if not isinstance(event, PyQt5.QtGui.QMouseEvent):\n return returnValue\n if not event.button():\n return returnValue\n returnValue = self.handleButtons(obj, returnValue)\n return returnValue\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000345", "length_bytes": 16165, "license_type": "permissive", "methods": [{"docstring": ":param obj: :param returnValue: :return:", "name": "handleButtons", "signature": "def handleButtons(self, obj, returnValue)"}, {"docstring": ":param obj: :param event: :return:", "name": "notify", "signature": "def notify(self, obj, event)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_011226", "prompt": "Implement the Python class `MyApp` described below.\n\nClass description:\nMyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.\n\nMethod signatures and docstrings:\n- def handleButtons(self, obj, returnValue): :param obj: :param returnValue: :return:\n- def notify(self, obj, event): :param obj: :param event: :return:", "prompted_full_text": "Implement the Python class `MyApp` described below.\n\nClass description:\nMyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.\n\nMethod signatures and docstrings:\n- def handleButtons(self, obj, returnValue): :param obj: :param returnValue: :return:\n- def notify(self, obj, event): :param obj: :param event: :return:\n\n<|skeleton|>\nclass MyApp:\n \"\"\"MyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.\"\"\"\n\n def handleButtons(self, obj, returnValue):\n \"\"\":param obj: :param returnValue: :return:\"\"\"\n <|body_0|>\n\n def notify(self, obj, event):\n \"\"\":param obj: :param event: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if obj.objectName() == 'MainWindowWindow':\n return returnValue\n if obj == self.last:\n return returnValue\n else:\n self.last = obj\n if isinstance(obj, PyQt5.QtWidgets.QTabBar):\n self.log.warning(f'Click Tab : [{obj.tabText(obj.currentIndex())}]')\n elif isinstance(obj, PyQt5.QtWidgets.QComboBox):\n self.log.warning(f'Click DropDown: [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QPushButton):\n self.log.warning(f'Click Button : [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QRadioButton):\n self.log.warning(f'Click Radio : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QGroupBox):\n self.log.warning(f'Click Group : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QCheckBox):\n self.log.warning(f'Click Checkbox: [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QLineEdit):\n self.log.warning(f'Click EditLine: [{obj.objectName()}]:{obj.text()}')\n else:\n self.log.warning(f'Click Object : [{obj.objectName()}]')\n return returnValue\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n returnValue = PyQt5.QtWidgets.QApplication.notify(self, obj, event)\n except Exception as e:\n self.log.critical('----------------------------------------------------')\n self.log.critical('Event: {0}'.format(event))\n self.log.critical('EventType: {0}'.format(event.type()))\n self.log.critical('Exception error in event loop: {0}'.format(e))\n self.log.critical('----------------------------------------------------')\n returnValue = False\n if not isinstance(event, PyQt5.QtGui.QMouseEvent):\n return returnValue\n if not event.button():\n return returnValue\n returnValue = self.handleButtons(obj, returnValue)\n return returnValue\n<|end_body_1|>\n", "revision_id": "c38c46050989a463f8e65b532b55c793f1ed9d15", "skeleton": "<|skeleton|>\nclass MyApp:\n \"\"\"MyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.\"\"\"\n\n def handleButtons(self, obj, returnValue):\n \"\"\":param obj: :param returnValue: :return:\"\"\"\n <|body_0|>\n\n def notify(self, obj, event):\n \"\"\":param obj: :param event: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MyApp:\n \"\"\"MyApp implements a custom notify handler to log errors, when C++ classes and python wrapper in PyQt5 environment mismatch. mostly this relates to the situation when a C++ object is already deleted, but the python wrapper still exists. so far I know that's the only chance to log this issues. in addition it writes mouse pressed and key pressed events in debug level to log including event and object name to be analyse the input methods.\"\"\"\n\n def handleButtons(self, obj, returnValue):\n \"\"\":param obj: :param returnValue: :return:\"\"\"\n if obj.objectName() == 'MainWindowWindow':\n return returnValue\n if obj == self.last:\n return returnValue\n else:\n self.last = obj\n if isinstance(obj, PyQt5.QtWidgets.QTabBar):\n self.log.warning(f'Click Tab : [{obj.tabText(obj.currentIndex())}]')\n elif isinstance(obj, PyQt5.QtWidgets.QComboBox):\n self.log.warning(f'Click DropDown: [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QPushButton):\n self.log.warning(f'Click Button : [{obj.objectName()}]')\n elif isinstance(obj, PyQt5.QtWidgets.QRadioButton):\n self.log.warning(f'Click Radio : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QGroupBox):\n self.log.warning(f'Click Group : [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QCheckBox):\n self.log.warning(f'Click Checkbox: [{obj.objectName()}]:{obj.isChecked()}')\n elif isinstance(obj, PyQt5.QtWidgets.QLineEdit):\n self.log.warning(f'Click EditLine: [{obj.objectName()}]:{obj.text()}')\n else:\n self.log.warning(f'Click Object : [{obj.objectName()}]')\n return returnValue\n\n def notify(self, obj, event):\n \"\"\":param obj: :param event: :return:\"\"\"\n try:\n returnValue = PyQt5.QtWidgets.QApplication.notify(self, obj, event)\n except Exception as e:\n self.log.critical('----------------------------------------------------')\n self.log.critical('Event: {0}'.format(event))\n self.log.critical('EventType: {0}'.format(event.type()))\n self.log.critical('Exception error in event loop: {0}'.format(e))\n self.log.critical('----------------------------------------------------')\n returnValue = False\n if not isinstance(event, PyQt5.QtGui.QMouseEvent):\n return returnValue\n if not event.button():\n return returnValue\n returnValue = self.handleButtons(obj, returnValue)\n return returnValue\n", "source": "the_stack_v2_python_sparse", "source_path": "mw4/loader.py", "source_repo": "nickym998/MountWizzard4", "split": "test", "star_events_count": 0} {"blob_id": "f780ed75813fabd82fc498e502fefde79f5ff479", "bodies": ["errors = {}\nif not name:\n errors['name'] = _('Name must be set')\nif not date_of_birth:\n errors['date_of_birth'] = _('Date of Birth must be set')\nif not contact_number:\n errors['contact_number'] = _('Contact number must be set')\nif not password:\n errors['password'] = _('Password must be set')\nif errors:\n raise ValidationError(errors)\nvalidate_date_of_birth(date_of_birth)\nvalidate_contact_number(contact_number)\nuser = self.model(name=name, date_of_birth=date_of_birth, contact_number=contact_number, **extra_fields)\nuser.set_password(password)\nuser.save()\nreturn user", "extra_fields.setdefault('is_staff', True)\nextra_fields.setdefault('is_superuser', True)\nreturn self.create_user(name, date_of_birth, contact_number, password, **extra_fields)"], "bodies_text": "<|body_start_0|>\n errors = {}\n if not name:\n errors['name'] = _('Name must be set')\n if not date_of_birth:\n errors['date_of_birth'] = _('Date of Birth must be set')\n if not contact_number:\n errors['contact_number'] = _('Contact number must be set')\n if not password:\n errors['password'] = _('Password must be set')\n if errors:\n raise ValidationError(errors)\n validate_date_of_birth(date_of_birth)\n validate_contact_number(contact_number)\n user = self.model(name=name, date_of_birth=date_of_birth, contact_number=contact_number, **extra_fields)\n user.set_password(password)\n user.save()\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n return self.create_user(name, date_of_birth, contact_number, password, **extra_fields)\n<|end_body_1|>\n", "class_docstring": "Customer model manager where contact_number is the unique identifier for authentication instead of usernames.", "class_name": "CustomerManager", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomerManager:\n \"\"\"Customer model manager where contact_number is the unique identifier for authentication instead of usernames.\"\"\"\n\n def create_user(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save an user with the given contact number and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save a superuser with the given email and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n errors = {}\n if not name:\n errors['name'] = _('Name must be set')\n if not date_of_birth:\n errors['date_of_birth'] = _('Date of Birth must be set')\n if not contact_number:\n errors['contact_number'] = _('Contact number must be set')\n if not password:\n errors['password'] = _('Password must be set')\n if errors:\n raise ValidationError(errors)\n validate_date_of_birth(date_of_birth)\n validate_contact_number(contact_number)\n user = self.model(name=name, date_of_birth=date_of_birth, contact_number=contact_number, **extra_fields)\n user.set_password(password)\n user.save()\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n return self.create_user(name, date_of_birth, contact_number, password, **extra_fields)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000346", "length_bytes": 1797, "license_type": "no_license", "methods": [{"docstring": "Create and save an user with the given contact number and password.", "name": "create_user", "signature": "def create_user(self, name, date_of_birth, contact_number, password, **extra_fields)"}, {"docstring": "Create and save a superuser with the given email and password.", "name": "create_superuser", "signature": "def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_010322", "prompt": "Implement the Python class `CustomerManager` described below.\n\nClass description:\nCustomer model manager where contact_number is the unique identifier for authentication instead of usernames.\n\nMethod signatures and docstrings:\n- def create_user(self, name, date_of_birth, contact_number, password, **extra_fields): Create and save an user with the given contact number and password.\n- def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields): Create and save a superuser with the given email and password.", "prompted_full_text": "Implement the Python class `CustomerManager` described below.\n\nClass description:\nCustomer model manager where contact_number is the unique identifier for authentication instead of usernames.\n\nMethod signatures and docstrings:\n- def create_user(self, name, date_of_birth, contact_number, password, **extra_fields): Create and save an user with the given contact number and password.\n- def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields): Create and save a superuser with the given email and password.\n\n<|skeleton|>\nclass CustomerManager:\n \"\"\"Customer model manager where contact_number is the unique identifier for authentication instead of usernames.\"\"\"\n\n def create_user(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save an user with the given contact number and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save a superuser with the given email and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n errors = {}\n if not name:\n errors['name'] = _('Name must be set')\n if not date_of_birth:\n errors['date_of_birth'] = _('Date of Birth must be set')\n if not contact_number:\n errors['contact_number'] = _('Contact number must be set')\n if not password:\n errors['password'] = _('Password must be set')\n if errors:\n raise ValidationError(errors)\n validate_date_of_birth(date_of_birth)\n validate_contact_number(contact_number)\n user = self.model(name=name, date_of_birth=date_of_birth, contact_number=contact_number, **extra_fields)\n user.set_password(password)\n user.save()\n return user\n<|end_body_0|>\n\n<|body_start_1|>\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n return self.create_user(name, date_of_birth, contact_number, password, **extra_fields)\n<|end_body_1|>\n", "revision_id": "3013c44694305ee20571b62f4a9d8229d4f25a06", "skeleton": "<|skeleton|>\nclass CustomerManager:\n \"\"\"Customer model manager where contact_number is the unique identifier for authentication instead of usernames.\"\"\"\n\n def create_user(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save an user with the given contact number and password.\"\"\"\n <|body_0|>\n\n def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save a superuser with the given email and password.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CustomerManager:\n \"\"\"Customer model manager where contact_number is the unique identifier for authentication instead of usernames.\"\"\"\n\n def create_user(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save an user with the given contact number and password.\"\"\"\n errors = {}\n if not name:\n errors['name'] = _('Name must be set')\n if not date_of_birth:\n errors['date_of_birth'] = _('Date of Birth must be set')\n if not contact_number:\n errors['contact_number'] = _('Contact number must be set')\n if not password:\n errors['password'] = _('Password must be set')\n if errors:\n raise ValidationError(errors)\n validate_date_of_birth(date_of_birth)\n validate_contact_number(contact_number)\n user = self.model(name=name, date_of_birth=date_of_birth, contact_number=contact_number, **extra_fields)\n user.set_password(password)\n user.save()\n return user\n\n def create_superuser(self, name, date_of_birth, contact_number, password, **extra_fields):\n \"\"\"Create and save a superuser with the given email and password.\"\"\"\n extra_fields.setdefault('is_staff', True)\n extra_fields.setdefault('is_superuser', True)\n return self.create_user(name, date_of_birth, contact_number, password, **extra_fields)\n", "source": "the_stack_v2_python_sparse", "source_path": "car_rental_service/apps/customers/managers.py", "source_repo": "vivek-at-work/rent-a-car", "split": "test", "star_events_count": 0} {"blob_id": "e89dd867a80ad63191328f5bc337877fef4b67cf", "bodies": ["changes = []\nrefname = payload['ref']\nmatch = re.match('^refs/(heads|tags)/(.+)$', refname)\nif not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\nbranch = match.group(2)\nif payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\nfor commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' % (commit['id'],))\n continue\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n when_timestamp = dateparse(commit['timestamp'])\n log.msg('New revision: %s' % commit['id'][:8])\n change = {'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': files, 'comments': commit['message'], 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': branch, 'revlink': commit['url'], 'repository': repo_url, 'project': project, 'category': event, 'properties': {'event': event}}\n if codebase is not None:\n change['codebase'] = codebase\n changes.append(change)\nreturn changes", "attrs = payload['object_attributes']\ncommit = attrs['last_commit']\nwhen_timestamp = dateparse(commit['timestamp'])\nrepo_url = attrs['source']['git_http_url']\nchanges = [{'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': [], 'comments': 'MR#{}: {}\\n\\n{}'.format(attrs['iid'], attrs['title'], attrs['description']), 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': attrs['source_branch'], 'repository': repo_url, 'project': project, 'category': event, 'revlink': attrs['url'], 'properties': {'target_branch': attrs['target_branch'], 'target_repository': attrs['target']['git_http_url'], 'event': event}}]\nif codebase is not None:\n changes[0]['codebase'] = codebase\nreturn changes", "expected_secret = isinstance(self.options, dict) and self.options.get('secret')\nif expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n if received_secret != expected_secret:\n raise ValueError('Invalid secret')\ntry:\n payload = json.load(request.content)\nexcept Exception as e:\n raise ValueError('Error loading JSON: ' + str(e))\nevent_type = request.getHeader(_HEADER_EVENT)\nevent_type = bytes2NativeString(event_type)\nevent_type = payload.get('object_kind', event_type)\nproject = request.args.get('project', [''])[0]\ncodebase = request.args.get('codebase', [None])[0]\nif event_type in ('push', 'tag_push', 'Push Hook'):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(payload, user, repo, repo_url, project, event_type, codebase=codebase)\nelif event_type == 'merge_request':\n changes = self._process_merge_request_change(payload, project, event_type, codebase=codebase)\nelse:\n changes = []\nif changes:\n log.msg('Received {} changes from {} gitlab event'.format(len(changes), event_type))\nreturn (changes, 'git')"], "bodies_text": "<|body_start_0|>\n changes = []\n refname = payload['ref']\n match = re.match('^refs/(heads|tags)/(.+)$', refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' % (commit['id'],))\n continue\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n when_timestamp = dateparse(commit['timestamp'])\n log.msg('New revision: %s' % commit['id'][:8])\n change = {'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': files, 'comments': commit['message'], 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': branch, 'revlink': commit['url'], 'repository': repo_url, 'project': project, 'category': event, 'properties': {'event': event}}\n if codebase is not None:\n change['codebase'] = codebase\n changes.append(change)\n return changes\n<|end_body_0|>\n\n<|body_start_1|>\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n repo_url = attrs['source']['git_http_url']\n changes = [{'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': [], 'comments': 'MR#{}: {}\\n\\n{}'.format(attrs['iid'], attrs['title'], attrs['description']), 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': attrs['source_branch'], 'repository': repo_url, 'project': project, 'category': event, 'revlink': attrs['url'], 'properties': {'target_branch': attrs['target_branch'], 'target_repository': attrs['target']['git_http_url'], 'event': event}}]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n<|end_body_1|>\n\n<|body_start_2|>\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n if received_secret != expected_secret:\n raise ValueError('Invalid secret')\n try:\n payload = json.load(request.content)\n except Exception as e:\n raise ValueError('Error loading JSON: ' + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2NativeString(event_type)\n event_type = payload.get('object_kind', event_type)\n project = request.args.get('project', [''])[0]\n codebase = request.args.get('codebase', [None])[0]\n if event_type in ('push', 'tag_push', 'Push Hook'):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(payload, user, repo, repo_url, project, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(payload, project, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg('Received {} changes from {} gitlab event'.format(len(changes), event_type))\n return (changes, 'git')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "GitLabHandler", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GitLabHandler:\n\n def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None):\n \"\"\"Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n <|body_0|>\n\n def _process_merge_request_change(self, payload, project, event, codebase=None):\n \"\"\"Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n <|body_1|>\n\n def getChanges(self, request):\n \"\"\"Reponds only to POST events and starts the build process :arguments: request the http request object\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n changes = []\n refname = payload['ref']\n match = re.match('^refs/(heads|tags)/(.+)$', refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' % (commit['id'],))\n continue\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n when_timestamp = dateparse(commit['timestamp'])\n log.msg('New revision: %s' % commit['id'][:8])\n change = {'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': files, 'comments': commit['message'], 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': branch, 'revlink': commit['url'], 'repository': repo_url, 'project': project, 'category': event, 'properties': {'event': event}}\n if codebase is not None:\n change['codebase'] = codebase\n changes.append(change)\n return changes\n<|end_body_0|>\n\n<|body_start_1|>\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n repo_url = attrs['source']['git_http_url']\n changes = [{'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': [], 'comments': 'MR#{}: {}\\n\\n{}'.format(attrs['iid'], attrs['title'], attrs['description']), 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': attrs['source_branch'], 'repository': repo_url, 'project': project, 'category': event, 'revlink': attrs['url'], 'properties': {'target_branch': attrs['target_branch'], 'target_repository': attrs['target']['git_http_url'], 'event': event}}]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n<|end_body_1|>\n\n<|body_start_2|>\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n if received_secret != expected_secret:\n raise ValueError('Invalid secret')\n try:\n payload = json.load(request.content)\n except Exception as e:\n raise ValueError('Error loading JSON: ' + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2NativeString(event_type)\n event_type = payload.get('object_kind', event_type)\n project = request.args.get('project', [''])[0]\n codebase = request.args.get('codebase', [None])[0]\n if event_type in ('push', 'tag_push', 'Push Hook'):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(payload, user, repo, repo_url, project, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(payload, project, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg('Received {} changes from {} gitlab event'.format(len(changes), event_type))\n return (changes, 'git')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000347", "length_bytes": 6413, "license_type": "permissive", "methods": [{"docstring": "Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.", "name": "_process_change", "signature": "def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None)"}, {"docstring": "Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.", "name": "_process_merge_request_change", "signature": "def _process_merge_request_change(self, payload, project, event, codebase=None)"}, {"docstring": "Reponds only to POST events and starts the build process :arguments: request the http request object", "name": "getChanges", "signature": "def getChanges(self, request)"}], "n_methods": 3, "prompt": "Implement the Python class `GitLabHandler` described below.\n\nClass description:\nImplement the GitLabHandler class.\n\nMethod signatures and docstrings:\n- def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None): Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\n- def _process_merge_request_change(self, payload, project, event, codebase=None): Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\n- def getChanges(self, request): Reponds only to POST events and starts the build process :arguments: request the http request object", "prompted_full_text": "Implement the Python class `GitLabHandler` described below.\n\nClass description:\nImplement the GitLabHandler class.\n\nMethod signatures and docstrings:\n- def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None): Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\n- def _process_merge_request_change(self, payload, project, event, codebase=None): Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\n- def getChanges(self, request): Reponds only to POST events and starts the build process :arguments: request the http request object\n\n<|skeleton|>\nclass GitLabHandler:\n\n def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None):\n \"\"\"Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n <|body_0|>\n\n def _process_merge_request_change(self, payload, project, event, codebase=None):\n \"\"\"Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n <|body_1|>\n\n def getChanges(self, request):\n \"\"\"Reponds only to POST events and starts the build process :arguments: request the http request object\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n changes = []\n refname = payload['ref']\n match = re.match('^refs/(heads|tags)/(.+)$', refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' % (commit['id'],))\n continue\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n when_timestamp = dateparse(commit['timestamp'])\n log.msg('New revision: %s' % commit['id'][:8])\n change = {'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': files, 'comments': commit['message'], 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': branch, 'revlink': commit['url'], 'repository': repo_url, 'project': project, 'category': event, 'properties': {'event': event}}\n if codebase is not None:\n change['codebase'] = codebase\n changes.append(change)\n return changes\n<|end_body_0|>\n\n<|body_start_1|>\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n repo_url = attrs['source']['git_http_url']\n changes = [{'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': [], 'comments': 'MR#{}: {}\\n\\n{}'.format(attrs['iid'], attrs['title'], attrs['description']), 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': attrs['source_branch'], 'repository': repo_url, 'project': project, 'category': event, 'revlink': attrs['url'], 'properties': {'target_branch': attrs['target_branch'], 'target_repository': attrs['target']['git_http_url'], 'event': event}}]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n<|end_body_1|>\n\n<|body_start_2|>\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n if received_secret != expected_secret:\n raise ValueError('Invalid secret')\n try:\n payload = json.load(request.content)\n except Exception as e:\n raise ValueError('Error loading JSON: ' + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2NativeString(event_type)\n event_type = payload.get('object_kind', event_type)\n project = request.args.get('project', [''])[0]\n codebase = request.args.get('codebase', [None])[0]\n if event_type in ('push', 'tag_push', 'Push Hook'):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(payload, user, repo, repo_url, project, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(payload, project, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg('Received {} changes from {} gitlab event'.format(len(changes), event_type))\n return (changes, 'git')\n<|end_body_2|>\n", "revision_id": "09e8e488edfb7e46cf5867b2b5a6ebe0b1929f78", "skeleton": "<|skeleton|>\nclass GitLabHandler:\n\n def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None):\n \"\"\"Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n <|body_0|>\n\n def _process_merge_request_change(self, payload, project, event, codebase=None):\n \"\"\"Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n <|body_1|>\n\n def getChanges(self, request):\n \"\"\"Reponds only to POST events and starts the build process :arguments: request the http request object\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GitLabHandler:\n def _process_change(self, payload, user, repo, repo_url, project, event, codebase=None):\n \"\"\"Consumes the JSON as a python object and actually starts the build. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n changes = []\n refname = payload['ref']\n match = re.match('^refs/(heads|tags)/(.+)$', refname)\n if not match:\n log.msg(\"Ignoring refname `%s': Not a branch\" % refname)\n return changes\n branch = match.group(2)\n if payload.get('deleted'):\n log.msg(\"Branch `%s' deleted, ignoring\" % branch)\n return changes\n for commit in payload['commits']:\n if not commit.get('distinct', True):\n log.msg('Commit `%s` is a non-distinct commit, ignoring...' % (commit['id'],))\n continue\n files = []\n for kind in ('added', 'modified', 'removed'):\n files.extend(commit.get(kind, []))\n when_timestamp = dateparse(commit['timestamp'])\n log.msg('New revision: %s' % commit['id'][:8])\n change = {'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': files, 'comments': commit['message'], 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': branch, 'revlink': commit['url'], 'repository': repo_url, 'project': project, 'category': event, 'properties': {'event': event}}\n if codebase is not None:\n change['codebase'] = codebase\n changes.append(change)\n return changes\n\n def _process_merge_request_change(self, payload, project, event, codebase=None):\n \"\"\"Consumes the merge_request JSON as a python object and turn it into a buildbot change. :arguments: payload Python Object that represents the JSON sent by GitLab Service Hook.\"\"\"\n attrs = payload['object_attributes']\n commit = attrs['last_commit']\n when_timestamp = dateparse(commit['timestamp'])\n repo_url = attrs['source']['git_http_url']\n changes = [{'author': '%s <%s>' % (commit['author']['name'], commit['author']['email']), 'files': [], 'comments': 'MR#{}: {}\\n\\n{}'.format(attrs['iid'], attrs['title'], attrs['description']), 'revision': commit['id'], 'when_timestamp': when_timestamp, 'branch': attrs['source_branch'], 'repository': repo_url, 'project': project, 'category': event, 'revlink': attrs['url'], 'properties': {'target_branch': attrs['target_branch'], 'target_repository': attrs['target']['git_http_url'], 'event': event}}]\n if codebase is not None:\n changes[0]['codebase'] = codebase\n return changes\n\n def getChanges(self, request):\n \"\"\"Reponds only to POST events and starts the build process :arguments: request the http request object\"\"\"\n expected_secret = isinstance(self.options, dict) and self.options.get('secret')\n if expected_secret:\n received_secret = request.getHeader(_HEADER_GITLAB_TOKEN)\n if received_secret != expected_secret:\n raise ValueError('Invalid secret')\n try:\n payload = json.load(request.content)\n except Exception as e:\n raise ValueError('Error loading JSON: ' + str(e))\n event_type = request.getHeader(_HEADER_EVENT)\n event_type = bytes2NativeString(event_type)\n event_type = payload.get('object_kind', event_type)\n project = request.args.get('project', [''])[0]\n codebase = request.args.get('codebase', [None])[0]\n if event_type in ('push', 'tag_push', 'Push Hook'):\n user = payload['user_name']\n repo = payload['repository']['name']\n repo_url = payload['repository']['url']\n changes = self._process_change(payload, user, repo, repo_url, project, event_type, codebase=codebase)\n elif event_type == 'merge_request':\n changes = self._process_merge_request_change(payload, project, event_type, codebase=codebase)\n else:\n changes = []\n if changes:\n log.msg('Received {} changes from {} gitlab event'.format(len(changes), event_type))\n return (changes, 'git')\n", "source": "the_stack_v2_python_sparse", "source_path": "bb-master/sandbox/lib/python3.5/site-packages/buildbot/www/hooks/gitlab.py", "source_repo": "Alecto3-D/testable-greeter", "split": "test", "star_events_count": 2} {"blob_id": "36e30795b5bbff7d51d39795f421f2d0cb57429a", "bodies": ["super().__init__(**kwargs)\nself._vocab_size = vocab_size\nself._num_mixtures = num_mixtures\nself._use_input_context_gate = use_input_context_gate\nself._use_output_context_gate = use_output_context_gate\nself._vocab_as_last_dim = vocab_as_last_dim\nself._normalizer_params = normalizer_params\nself._l2_regularizer = l2_regularizer\nif use_input_context_gate:\n self._input_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='input_context_gate')\nif use_output_context_gate:\n self._output_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='output_context_gate')\nself._gate_dense = layers.Dense(vocab_size * (num_mixtures + 1), activation=None, bias_initializer=None, kernel_regularizer=l2_regularizer, name='gate')\nself._expert_dense = layers.Dense(vocab_size * num_mixtures, activation=None, kernel_regularizer=l2_regularizer, name='expert')", "if self._use_input_context_gate:\n inputs = self._input_context_gate(inputs)\ngate_activations = self._gate_dense(inputs)\nexpert_activations = self._expert_dense(inputs)\nif self._vocab_as_last_dim:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1, self._vocab_size])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures, self._vocab_size])\nelse:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures])\ngating_distribution = tf.nn.softmax(gate_activations, axis=1)\nexpert_distribution = tf.nn.sigmoid(expert_activations)\nfinal_probabilities = tf.reduce_sum(gating_distribution[:, :self._num_mixtures] * expert_distribution, axis=1)\nif not self._vocab_as_last_dim:\n final_probabilities = tf.reshape(final_probabilities, [-1, self._vocab_size])\nreturn {'predictions': final_probabilities}"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n self._vocab_size = vocab_size\n self._num_mixtures = num_mixtures\n self._use_input_context_gate = use_input_context_gate\n self._use_output_context_gate = use_output_context_gate\n self._vocab_as_last_dim = vocab_as_last_dim\n self._normalizer_params = normalizer_params\n self._l2_regularizer = l2_regularizer\n if use_input_context_gate:\n self._input_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='input_context_gate')\n if use_output_context_gate:\n self._output_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='output_context_gate')\n self._gate_dense = layers.Dense(vocab_size * (num_mixtures + 1), activation=None, bias_initializer=None, kernel_regularizer=l2_regularizer, name='gate')\n self._expert_dense = layers.Dense(vocab_size * num_mixtures, activation=None, kernel_regularizer=l2_regularizer, name='expert')\n<|end_body_0|>\n\n<|body_start_1|>\n if self._use_input_context_gate:\n inputs = self._input_context_gate(inputs)\n gate_activations = self._gate_dense(inputs)\n expert_activations = self._expert_dense(inputs)\n if self._vocab_as_last_dim:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1, self._vocab_size])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures, self._vocab_size])\n else:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures])\n gating_distribution = tf.nn.softmax(gate_activations, axis=1)\n expert_distribution = tf.nn.sigmoid(expert_activations)\n final_probabilities = tf.reduce_sum(gating_distribution[:, :self._num_mixtures] * expert_distribution, axis=1)\n if not self._vocab_as_last_dim:\n final_probabilities = tf.reshape(final_probabilities, [-1, self._vocab_size])\n return {'predictions': final_probabilities}\n<|end_body_1|>\n", "class_docstring": "A softmax over a mixture of logistic models (with L2 regularization).", "class_name": "MoeModel", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MoeModel:\n \"\"\"A softmax over a mixture of logistic models (with L2 regularization).\"\"\"\n\n def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs):\n \"\"\"Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension\"\"\"\n <|body_0|>\n\n def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]:\n \"\"\"MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self._vocab_size = vocab_size\n self._num_mixtures = num_mixtures\n self._use_input_context_gate = use_input_context_gate\n self._use_output_context_gate = use_output_context_gate\n self._vocab_as_last_dim = vocab_as_last_dim\n self._normalizer_params = normalizer_params\n self._l2_regularizer = l2_regularizer\n if use_input_context_gate:\n self._input_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='input_context_gate')\n if use_output_context_gate:\n self._output_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='output_context_gate')\n self._gate_dense = layers.Dense(vocab_size * (num_mixtures + 1), activation=None, bias_initializer=None, kernel_regularizer=l2_regularizer, name='gate')\n self._expert_dense = layers.Dense(vocab_size * num_mixtures, activation=None, kernel_regularizer=l2_regularizer, name='expert')\n<|end_body_0|>\n\n<|body_start_1|>\n if self._use_input_context_gate:\n inputs = self._input_context_gate(inputs)\n gate_activations = self._gate_dense(inputs)\n expert_activations = self._expert_dense(inputs)\n if self._vocab_as_last_dim:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1, self._vocab_size])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures, self._vocab_size])\n else:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures])\n gating_distribution = tf.nn.softmax(gate_activations, axis=1)\n expert_distribution = tf.nn.sigmoid(expert_activations)\n final_probabilities = tf.reduce_sum(gating_distribution[:, :self._num_mixtures] * expert_distribution, axis=1)\n if not self._vocab_as_last_dim:\n final_probabilities = tf.reshape(final_probabilities, [-1, self._vocab_size])\n return {'predictions': final_probabilities}\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000348", "length_bytes": 5262, "license_type": "permissive", "methods": [{"docstring": "Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension", "name": "__init__", "signature": "def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs)"}, {"docstring": "MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.", "name": "call", "signature": "def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031571", "prompt": "Implement the Python class `MoeModel` described below.\n\nClass description:\nA softmax over a mixture of logistic models (with L2 regularization).\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs): Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension\n- def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]: MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.", "prompted_full_text": "Implement the Python class `MoeModel` described below.\n\nClass description:\nA softmax over a mixture of logistic models (with L2 regularization).\n\nMethod signatures and docstrings:\n- def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs): Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension\n- def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]: MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.\n\n<|skeleton|>\nclass MoeModel:\n \"\"\"A softmax over a mixture of logistic models (with L2 regularization).\"\"\"\n\n def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs):\n \"\"\"Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension\"\"\"\n <|body_0|>\n\n def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]:\n \"\"\"MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n self._vocab_size = vocab_size\n self._num_mixtures = num_mixtures\n self._use_input_context_gate = use_input_context_gate\n self._use_output_context_gate = use_output_context_gate\n self._vocab_as_last_dim = vocab_as_last_dim\n self._normalizer_params = normalizer_params\n self._l2_regularizer = l2_regularizer\n if use_input_context_gate:\n self._input_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='input_context_gate')\n if use_output_context_gate:\n self._output_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='output_context_gate')\n self._gate_dense = layers.Dense(vocab_size * (num_mixtures + 1), activation=None, bias_initializer=None, kernel_regularizer=l2_regularizer, name='gate')\n self._expert_dense = layers.Dense(vocab_size * num_mixtures, activation=None, kernel_regularizer=l2_regularizer, name='expert')\n<|end_body_0|>\n\n<|body_start_1|>\n if self._use_input_context_gate:\n inputs = self._input_context_gate(inputs)\n gate_activations = self._gate_dense(inputs)\n expert_activations = self._expert_dense(inputs)\n if self._vocab_as_last_dim:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1, self._vocab_size])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures, self._vocab_size])\n else:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures])\n gating_distribution = tf.nn.softmax(gate_activations, axis=1)\n expert_distribution = tf.nn.sigmoid(expert_activations)\n final_probabilities = tf.reduce_sum(gating_distribution[:, :self._num_mixtures] * expert_distribution, axis=1)\n if not self._vocab_as_last_dim:\n final_probabilities = tf.reshape(final_probabilities, [-1, self._vocab_size])\n return {'predictions': final_probabilities}\n<|end_body_1|>\n", "revision_id": "9c78e7f643231c0fc9cc64c991d2b8d5628b9668", "skeleton": "<|skeleton|>\nclass MoeModel:\n \"\"\"A softmax over a mixture of logistic models (with L2 regularization).\"\"\"\n\n def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs):\n \"\"\"Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension\"\"\"\n <|body_0|>\n\n def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]:\n \"\"\"MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MoeModel:\n \"\"\"A softmax over a mixture of logistic models (with L2 regularization).\"\"\"\n\n def __init__(self, vocab_size: int=3862, num_mixtures: int=2, use_input_context_gate: bool=False, use_output_context_gate: bool=False, normalizer_params: Optional[dict[str, Any]]=None, vocab_as_last_dim: bool=False, l2_regularizer: Optional[tf.keras.regularizers.Regularizer]=None, **kwargs):\n \"\"\"Creates a Mixture of (Logistic) Experts model. The model consists of a per-class softmax distribution over a configurable number of logistic classifiers. One of the classifiers in the mixture is not trained, and always predicts 0. Args: vocab_size: The number of classes in the dataset. num_mixtures: The number of mixtures (excluding a dummy 'expert' that always predicts the non-existence of an entity). use_input_context_gate: if True apply context gate layer to the input. use_output_context_gate: if True apply context gate layer to the output. normalizer_params: parameters of the batch normalization. vocab_as_last_dim: if True reshape `activations` and make `vocab_size` as the last dimension\"\"\"\n super().__init__(**kwargs)\n self._vocab_size = vocab_size\n self._num_mixtures = num_mixtures\n self._use_input_context_gate = use_input_context_gate\n self._use_output_context_gate = use_output_context_gate\n self._vocab_as_last_dim = vocab_as_last_dim\n self._normalizer_params = normalizer_params\n self._l2_regularizer = l2_regularizer\n if use_input_context_gate:\n self._input_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='input_context_gate')\n if use_output_context_gate:\n self._output_context_gate = nn_layers.ContextGate(normalizer_fn=layers.BatchNormalization, normalizer_params=normalizer_params, name='output_context_gate')\n self._gate_dense = layers.Dense(vocab_size * (num_mixtures + 1), activation=None, bias_initializer=None, kernel_regularizer=l2_regularizer, name='gate')\n self._expert_dense = layers.Dense(vocab_size * num_mixtures, activation=None, kernel_regularizer=l2_regularizer, name='expert')\n\n def call(self, inputs: tf.Tensor) -> dict[str, tf.Tensor]:\n \"\"\"MoE forward call. Args: inputs: 'batch_size' x 'num_features' matrix of input features. Returns: A dictionary with a tensor containing the probability predictions of the model in the 'predictions' key. The dimensions of the tensor are batch_size x num_classes.\"\"\"\n if self._use_input_context_gate:\n inputs = self._input_context_gate(inputs)\n gate_activations = self._gate_dense(inputs)\n expert_activations = self._expert_dense(inputs)\n if self._vocab_as_last_dim:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1, self._vocab_size])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures, self._vocab_size])\n else:\n gate_activations = tf.reshape(gate_activations, [-1, self._num_mixtures + 1])\n expert_activations = tf.reshape(expert_activations, [-1, self._num_mixtures])\n gating_distribution = tf.nn.softmax(gate_activations, axis=1)\n expert_distribution = tf.nn.sigmoid(expert_activations)\n final_probabilities = tf.reduce_sum(gating_distribution[:, :self._num_mixtures] * expert_distribution, axis=1)\n if not self._vocab_as_last_dim:\n final_probabilities = tf.reshape(final_probabilities, [-1, self._vocab_size])\n return {'predictions': final_probabilities}\n", "source": "the_stack_v2_python_sparse", "source_path": "official/projects/yt8m/modeling/heads/moe.py", "source_repo": "tensorflow/models", "split": "test", "star_events_count": 86763} {"blob_id": "9c4bb1ab64b3d3e0cbcc036288cbd4a5e986c27d", "bodies": ["try:\n response = Database.Table.get_item(Key={'DocumentID': document_id})\nexcept Exception as e:\n response = None\n Logger.info(f'Database GetDocument : document_id = {document_id} : exception = {e}')\nif response and response.get('Item') and (response['ResponseMetadata']['HTTPStatusCode'] == 200):\n return Document.from_dict(response['Item'])\nelse:\n return None", "for stage in stages:\n for state in states:\n response = Database.Table.query(IndexName=INDEX_PROGRESS, KeyConditionExpression=Key('StageState').eq(f'{stage}{HASH}{state}'.title()))\n for item in response['Items']:\n yield Document.from_dict(item)\nreturn []", "response = Database.Table.put_item(Item=document.to_dict())\nLogger.info(f'Database.PutDocument : DocumentID = {document.DocumentID}')\nreturn PASS if response['ResponseMetadata']['HTTPStatusCode'] == 200 else FAIL", "for documentToUpdate in Database.GetDocuments([currentStage], [State.SUCCESS]):\n Logger.info(f'Moving {documentToUpdate} stage to {nextStage}')\n documentToUpdate.Stage = nextStage\n documentToUpdate.State = State.WAITING\n Database.PutDocument(documentToUpdate)"], "bodies_text": "<|body_start_0|>\n try:\n response = Database.Table.get_item(Key={'DocumentID': document_id})\n except Exception as e:\n response = None\n Logger.info(f'Database GetDocument : document_id = {document_id} : exception = {e}')\n if response and response.get('Item') and (response['ResponseMetadata']['HTTPStatusCode'] == 200):\n return Document.from_dict(response['Item'])\n else:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n for stage in stages:\n for state in states:\n response = Database.Table.query(IndexName=INDEX_PROGRESS, KeyConditionExpression=Key('StageState').eq(f'{stage}{HASH}{state}'.title()))\n for item in response['Items']:\n yield Document.from_dict(item)\n return []\n<|end_body_1|>\n\n<|body_start_2|>\n response = Database.Table.put_item(Item=document.to_dict())\n Logger.info(f'Database.PutDocument : DocumentID = {document.DocumentID}')\n return PASS if response['ResponseMetadata']['HTTPStatusCode'] == 200 else FAIL\n<|end_body_2|>\n\n<|body_start_3|>\n for documentToUpdate in Database.GetDocuments([currentStage], [State.SUCCESS]):\n Logger.info(f'Moving {documentToUpdate} stage to {nextStage}')\n documentToUpdate.Stage = nextStage\n documentToUpdate.State = State.WAITING\n Database.PutDocument(documentToUpdate)\n<|end_body_3|>\n", "class_docstring": "Database Abstraction Layer", "class_name": "Database", "detected_licenses": ["Apache-2.0", "MIT-0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Database:\n \"\"\"Database Abstraction Layer\"\"\"\n\n def GetDocument(document_id: str) -> Document:\n \"\"\"Fetch a specific document\"\"\"\n <|body_0|>\n\n def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]:\n \"\"\"Fetch a specific document set\"\"\"\n <|body_1|>\n\n def PutDocument(document: Document) -> Document:\n \"\"\"Update a specific document\"\"\"\n <|body_2|>\n\n def PromoteDocument(currentStage: Stage, nextStage: Stage):\n \"\"\"Promote documents in SUCCESS state from current stage to next stage WAITING state\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n response = Database.Table.get_item(Key={'DocumentID': document_id})\n except Exception as e:\n response = None\n Logger.info(f'Database GetDocument : document_id = {document_id} : exception = {e}')\n if response and response.get('Item') and (response['ResponseMetadata']['HTTPStatusCode'] == 200):\n return Document.from_dict(response['Item'])\n else:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n for stage in stages:\n for state in states:\n response = Database.Table.query(IndexName=INDEX_PROGRESS, KeyConditionExpression=Key('StageState').eq(f'{stage}{HASH}{state}'.title()))\n for item in response['Items']:\n yield Document.from_dict(item)\n return []\n<|end_body_1|>\n\n<|body_start_2|>\n response = Database.Table.put_item(Item=document.to_dict())\n Logger.info(f'Database.PutDocument : DocumentID = {document.DocumentID}')\n return PASS if response['ResponseMetadata']['HTTPStatusCode'] == 200 else FAIL\n<|end_body_2|>\n\n<|body_start_3|>\n for documentToUpdate in Database.GetDocuments([currentStage], [State.SUCCESS]):\n Logger.info(f'Moving {documentToUpdate} stage to {nextStage}')\n documentToUpdate.Stage = nextStage\n documentToUpdate.State = State.WAITING\n Database.PutDocument(documentToUpdate)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000349", "length_bytes": 3107, "license_type": "permissive", "methods": [{"docstring": "Fetch a specific document", "name": "GetDocument", "signature": "def GetDocument(document_id: str) -> Document"}, {"docstring": "Fetch a specific document set", "name": "GetDocuments", "signature": "def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]"}, {"docstring": "Update a specific document", "name": "PutDocument", "signature": "def PutDocument(document: Document) -> Document"}, {"docstring": "Promote documents in SUCCESS state from current stage to next stage WAITING state", "name": "PromoteDocument", "signature": "def PromoteDocument(currentStage: Stage, nextStage: Stage)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_007592", "prompt": "Implement the Python class `Database` described below.\n\nClass description:\nDatabase Abstraction Layer\n\nMethod signatures and docstrings:\n- def GetDocument(document_id: str) -> Document: Fetch a specific document\n- def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]: Fetch a specific document set\n- def PutDocument(document: Document) -> Document: Update a specific document\n- def PromoteDocument(currentStage: Stage, nextStage: Stage): Promote documents in SUCCESS state from current stage to next stage WAITING state", "prompted_full_text": "Implement the Python class `Database` described below.\n\nClass description:\nDatabase Abstraction Layer\n\nMethod signatures and docstrings:\n- def GetDocument(document_id: str) -> Document: Fetch a specific document\n- def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]: Fetch a specific document set\n- def PutDocument(document: Document) -> Document: Update a specific document\n- def PromoteDocument(currentStage: Stage, nextStage: Stage): Promote documents in SUCCESS state from current stage to next stage WAITING state\n\n<|skeleton|>\nclass Database:\n \"\"\"Database Abstraction Layer\"\"\"\n\n def GetDocument(document_id: str) -> Document:\n \"\"\"Fetch a specific document\"\"\"\n <|body_0|>\n\n def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]:\n \"\"\"Fetch a specific document set\"\"\"\n <|body_1|>\n\n def PutDocument(document: Document) -> Document:\n \"\"\"Update a specific document\"\"\"\n <|body_2|>\n\n def PromoteDocument(currentStage: Stage, nextStage: Stage):\n \"\"\"Promote documents in SUCCESS state from current stage to next stage WAITING state\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n response = Database.Table.get_item(Key={'DocumentID': document_id})\n except Exception as e:\n response = None\n Logger.info(f'Database GetDocument : document_id = {document_id} : exception = {e}')\n if response and response.get('Item') and (response['ResponseMetadata']['HTTPStatusCode'] == 200):\n return Document.from_dict(response['Item'])\n else:\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n for stage in stages:\n for state in states:\n response = Database.Table.query(IndexName=INDEX_PROGRESS, KeyConditionExpression=Key('StageState').eq(f'{stage}{HASH}{state}'.title()))\n for item in response['Items']:\n yield Document.from_dict(item)\n return []\n<|end_body_1|>\n\n<|body_start_2|>\n response = Database.Table.put_item(Item=document.to_dict())\n Logger.info(f'Database.PutDocument : DocumentID = {document.DocumentID}')\n return PASS if response['ResponseMetadata']['HTTPStatusCode'] == 200 else FAIL\n<|end_body_2|>\n\n<|body_start_3|>\n for documentToUpdate in Database.GetDocuments([currentStage], [State.SUCCESS]):\n Logger.info(f'Moving {documentToUpdate} stage to {nextStage}')\n documentToUpdate.Stage = nextStage\n documentToUpdate.State = State.WAITING\n Database.PutDocument(documentToUpdate)\n<|end_body_3|>\n", "revision_id": "633e6291eea95a3933d34cae53b68cf6570b9bbb", "skeleton": "<|skeleton|>\nclass Database:\n \"\"\"Database Abstraction Layer\"\"\"\n\n def GetDocument(document_id: str) -> Document:\n \"\"\"Fetch a specific document\"\"\"\n <|body_0|>\n\n def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]:\n \"\"\"Fetch a specific document set\"\"\"\n <|body_1|>\n\n def PutDocument(document: Document) -> Document:\n \"\"\"Update a specific document\"\"\"\n <|body_2|>\n\n def PromoteDocument(currentStage: Stage, nextStage: Stage):\n \"\"\"Promote documents in SUCCESS state from current stage to next stage WAITING state\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Database:\n \"\"\"Database Abstraction Layer\"\"\"\n\n def GetDocument(document_id: str) -> Document:\n \"\"\"Fetch a specific document\"\"\"\n try:\n response = Database.Table.get_item(Key={'DocumentID': document_id})\n except Exception as e:\n response = None\n Logger.info(f'Database GetDocument : document_id = {document_id} : exception = {e}')\n if response and response.get('Item') and (response['ResponseMetadata']['HTTPStatusCode'] == 200):\n return Document.from_dict(response['Item'])\n else:\n return None\n\n def GetDocuments(stages: List[Stage], states: List[State]) -> List[Document]:\n \"\"\"Fetch a specific document set\"\"\"\n for stage in stages:\n for state in states:\n response = Database.Table.query(IndexName=INDEX_PROGRESS, KeyConditionExpression=Key('StageState').eq(f'{stage}{HASH}{state}'.title()))\n for item in response['Items']:\n yield Document.from_dict(item)\n return []\n\n def PutDocument(document: Document) -> Document:\n \"\"\"Update a specific document\"\"\"\n response = Database.Table.put_item(Item=document.to_dict())\n Logger.info(f'Database.PutDocument : DocumentID = {document.DocumentID}')\n return PASS if response['ResponseMetadata']['HTTPStatusCode'] == 200 else FAIL\n\n def PromoteDocument(currentStage: Stage, nextStage: Stage):\n \"\"\"Promote documents in SUCCESS state from current stage to next stage WAITING state\"\"\"\n for documentToUpdate in Database.GetDocuments([currentStage], [State.SUCCESS]):\n Logger.info(f'Moving {documentToUpdate} stage to {nextStage}')\n documentToUpdate.Stage = nextStage\n documentToUpdate.State = State.WAITING\n Database.PutDocument(documentToUpdate)\n", "source": "the_stack_v2_python_sparse", "source_path": "source/lambdas/shared/database.py", "source_repo": "jhasatis/TabularDocumentDigitization", "split": "test", "star_events_count": 0} {"blob_id": "bfeb487e358e3f6741106b6cc291f1e178eb5aa1", "bodies": ["if type(sample) == tuple:\n image, noise_image = sample\n return (self.normalize(image), self.normalize(noise_image))\nelse:\n return self.normalize(sample)", "image = image.transpose((2, 0, 1))\nimage = image.astype(np.float32) / 255.0\nreturn image"], "bodies_text": "<|body_start_0|>\n if type(sample) == tuple:\n image, noise_image = sample\n return (self.normalize(image), self.normalize(noise_image))\n else:\n return self.normalize(sample)\n<|end_body_0|>\n\n<|body_start_1|>\n image = image.transpose((2, 0, 1))\n image = image.astype(np.float32) / 255.0\n return image\n<|end_body_1|>\n", "class_docstring": "Convert ndarray to Tensor.", "class_name": "ToTensor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ToTensor:\n \"\"\"Convert ndarray to Tensor.\"\"\"\n\n def __call__(self, sample):\n \"\"\"@param sample (tuple|ndarray): numpy image|images @return tensor image\"\"\"\n <|body_0|>\n\n def normalize(self, image):\n \"\"\"@param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if type(sample) == tuple:\n image, noise_image = sample\n return (self.normalize(image), self.normalize(noise_image))\n else:\n return self.normalize(sample)\n<|end_body_0|>\n\n<|body_start_1|>\n image = image.transpose((2, 0, 1))\n image = image.astype(np.float32) / 255.0\n return image\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000350", "length_bytes": 1672, "license_type": "no_license", "methods": [{"docstring": "@param sample (tuple|ndarray): numpy image|images @return tensor image", "name": "__call__", "signature": "def __call__(self, sample)"}, {"docstring": "@param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W", "name": "normalize", "signature": "def normalize(self, image)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_031385", "prompt": "Implement the Python class `ToTensor` described below.\n\nClass description:\nConvert ndarray to Tensor.\n\nMethod signatures and docstrings:\n- def __call__(self, sample): @param sample (tuple|ndarray): numpy image|images @return tensor image\n- def normalize(self, image): @param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W", "prompted_full_text": "Implement the Python class `ToTensor` described below.\n\nClass description:\nConvert ndarray to Tensor.\n\nMethod signatures and docstrings:\n- def __call__(self, sample): @param sample (tuple|ndarray): numpy image|images @return tensor image\n- def normalize(self, image): @param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W\n\n<|skeleton|>\nclass ToTensor:\n \"\"\"Convert ndarray to Tensor.\"\"\"\n\n def __call__(self, sample):\n \"\"\"@param sample (tuple|ndarray): numpy image|images @return tensor image\"\"\"\n <|body_0|>\n\n def normalize(self, image):\n \"\"\"@param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if type(sample) == tuple:\n image, noise_image = sample\n return (self.normalize(image), self.normalize(noise_image))\n else:\n return self.normalize(sample)\n<|end_body_0|>\n\n<|body_start_1|>\n image = image.transpose((2, 0, 1))\n image = image.astype(np.float32) / 255.0\n return image\n<|end_body_1|>\n", "revision_id": "525f4ced839fe0176304c3feacd436962bcd8a0e", "skeleton": "<|skeleton|>\nclass ToTensor:\n \"\"\"Convert ndarray to Tensor.\"\"\"\n\n def __call__(self, sample):\n \"\"\"@param sample (tuple|ndarray): numpy image|images @return tensor image\"\"\"\n <|body_0|>\n\n def normalize(self, image):\n \"\"\"@param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ToTensor:\n \"\"\"Convert ndarray to Tensor.\"\"\"\n\n def __call__(self, sample):\n \"\"\"@param sample (tuple|ndarray): numpy image|images @return tensor image\"\"\"\n if type(sample) == tuple:\n image, noise_image = sample\n return (self.normalize(image), self.normalize(noise_image))\n else:\n return self.normalize(sample)\n\n def normalize(self, image):\n \"\"\"@param image (numpy): image axis are H x W x C @return tensor of normalized image in range [0,1] with axis C X H X W\"\"\"\n image = image.transpose((2, 0, 1))\n image = image.astype(np.float32) / 255.0\n return image\n", "source": "the_stack_v2_python_sparse", "source_path": "backend/neural_network/data/transoformers.py", "source_repo": "jsz5/denoising_app", "split": "test", "star_events_count": 0} {"blob_id": "2779e52c6a1efaeadd4bb2177176091dc8fb1da7", "bodies": ["self.time_range = params_pre['time_range']\nself.plot_dir = params_pre['plot_dir']\nself.logger = logging.getLogger('simple')", "plotter = plot_ecei_timeslice(data_chunk)\ntidx_plot = [data_chunk.tb.time_to_idx(t) for t in self.time_range]\nif tidx_plot[0] is not None:\n self.logger.info(f'Plotting data into {self.plot_dir}. Plotting indices {tidx_plot[0]}, {tidx_plot[-1]}')\n for tidx in range(tidx_plot[0], tidx_plot[1]):\n fig = plotter.create_plot(data_chunk, tidx)\n fig.savefig(join(self.plot_dir, f'chunk_{data_chunk.tb.chunk_idx}_{tidx:04d}.png'))\nreturn data_chunk"], "bodies_text": "<|body_start_0|>\n self.time_range = params_pre['time_range']\n self.plot_dir = params_pre['plot_dir']\n self.logger = logging.getLogger('simple')\n<|end_body_0|>\n\n<|body_start_1|>\n plotter = plot_ecei_timeslice(data_chunk)\n tidx_plot = [data_chunk.tb.time_to_idx(t) for t in self.time_range]\n if tidx_plot[0] is not None:\n self.logger.info(f'Plotting data into {self.plot_dir}. Plotting indices {tidx_plot[0]}, {tidx_plot[-1]}')\n for tidx in range(tidx_plot[0], tidx_plot[1]):\n fig = plotter.create_plot(data_chunk, tidx)\n fig.savefig(join(self.plot_dir, f'chunk_{data_chunk.tb.chunk_idx}_{tidx:04d}.png'))\n return data_chunk\n<|end_body_1|>\n", "class_docstring": "Plots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.", "class_name": "pre_plot", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass pre_plot:\n \"\"\"Plots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.\"\"\"\n\n def __init__(self, params_pre):\n \"\"\"Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None\"\"\"\n <|body_0|>\n\n def process(self, data_chunk, executor=None):\n \"\"\"Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time_range = params_pre['time_range']\n self.plot_dir = params_pre['plot_dir']\n self.logger = logging.getLogger('simple')\n<|end_body_0|>\n\n<|body_start_1|>\n plotter = plot_ecei_timeslice(data_chunk)\n tidx_plot = [data_chunk.tb.time_to_idx(t) for t in self.time_range]\n if tidx_plot[0] is not None:\n self.logger.info(f'Plotting data into {self.plot_dir}. Plotting indices {tidx_plot[0]}, {tidx_plot[-1]}')\n for tidx in range(tidx_plot[0], tidx_plot[1]):\n fig = plotter.create_plot(data_chunk, tidx)\n fig.savefig(join(self.plot_dir, f'chunk_{data_chunk.tb.chunk_idx}_{tidx:04d}.png'))\n return data_chunk\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000351", "length_bytes": 1831, "license_type": "no_license", "methods": [{"docstring": "Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None", "name": "__init__", "signature": "def __init__(self, params_pre)"}, {"docstring": "Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images", "name": "process", "signature": "def process(self, data_chunk, executor=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_040306", "prompt": "Implement the Python class `pre_plot` described below.\n\nClass description:\nPlots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.\n\nMethod signatures and docstrings:\n- def __init__(self, params_pre): Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None\n- def process(self, data_chunk, executor=None): Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images", "prompted_full_text": "Implement the Python class `pre_plot` described below.\n\nClass description:\nPlots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.\n\nMethod signatures and docstrings:\n- def __init__(self, params_pre): Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None\n- def process(self, data_chunk, executor=None): Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images\n\n<|skeleton|>\nclass pre_plot:\n \"\"\"Plots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.\"\"\"\n\n def __init__(self, params_pre):\n \"\"\"Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None\"\"\"\n <|body_0|>\n\n def process(self, data_chunk, executor=None):\n \"\"\"Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time_range = params_pre['time_range']\n self.plot_dir = params_pre['plot_dir']\n self.logger = logging.getLogger('simple')\n<|end_body_0|>\n\n<|body_start_1|>\n plotter = plot_ecei_timeslice(data_chunk)\n tidx_plot = [data_chunk.tb.time_to_idx(t) for t in self.time_range]\n if tidx_plot[0] is not None:\n self.logger.info(f'Plotting data into {self.plot_dir}. Plotting indices {tidx_plot[0]}, {tidx_plot[-1]}')\n for tidx in range(tidx_plot[0], tidx_plot[1]):\n fig = plotter.create_plot(data_chunk, tidx)\n fig.savefig(join(self.plot_dir, f'chunk_{data_chunk.tb.chunk_idx}_{tidx:04d}.png'))\n return data_chunk\n<|end_body_1|>\n", "revision_id": "7ce63705e18c427f448c8d720c950a54add07966", "skeleton": "<|skeleton|>\nclass pre_plot:\n \"\"\"Plots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.\"\"\"\n\n def __init__(self, params_pre):\n \"\"\"Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None\"\"\"\n <|body_0|>\n\n def process(self, data_chunk, executor=None):\n \"\"\"Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class pre_plot:\n \"\"\"Plots the pre-processed data and stores it to a file. Plots are made using the instantaneous data in the pipeline. That is, the position of the plot routine in the preprocessing pipeline is important.\"\"\"\n\n def __init__(self, params_pre):\n \"\"\"Instantiates the pre_plot class as a callable. Args: params_pre (dictionary): Preprocessing section of Delta configuration Returns: None\"\"\"\n self.time_range = params_pre['time_range']\n self.plot_dir = params_pre['plot_dir']\n self.logger = logging.getLogger('simple')\n\n def process(self, data_chunk, executor=None):\n \"\"\"Plots the data chunk. Args: data_chunk (2d image): Data chunk to be wavelet transformed. executor (PEP-3148-style executor): Executor on which to execute. Returns: data_chunk (2d_image): Wavelet-filtered images\"\"\"\n plotter = plot_ecei_timeslice(data_chunk)\n tidx_plot = [data_chunk.tb.time_to_idx(t) for t in self.time_range]\n if tidx_plot[0] is not None:\n self.logger.info(f'Plotting data into {self.plot_dir}. Plotting indices {tidx_plot[0]}, {tidx_plot[-1]}')\n for tidx in range(tidx_plot[0], tidx_plot[1]):\n fig = plotter.create_plot(data_chunk, tidx)\n fig.savefig(join(self.plot_dir, f'chunk_{data_chunk.tb.chunk_idx}_{tidx:04d}.png'))\n return data_chunk\n", "source": "the_stack_v2_python_sparse", "source_path": "delta/preprocess/pre_plot.py", "source_repo": "rkube/delta", "split": "test", "star_events_count": 7} {"blob_id": "f5d7ff92dd559a2f12d3a54a170be7b5e2d6a82b", "bodies": ["self.o = o\nself.children = []\nself.str_func = str_func", "if self.str_func is not None:\n return self.str_func(self.o)\nelse:\n return str(self.o)"], "bodies_text": "<|body_start_0|>\n self.o = o\n self.children = []\n self.str_func = str_func\n<|end_body_0|>\n\n<|body_start_1|>\n if self.str_func is not None:\n return self.str_func(self.o)\n else:\n return str(self.o)\n<|end_body_1|>\n", "class_docstring": "A node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.", "class_name": "_Node", "detected_licenses": ["MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass _Node:\n \"\"\"A node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.\"\"\"\n\n def __init__(self, o, str_func=None):\n \"\"\"You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Override str(self.o) if str_func is defined.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.o = o\n self.children = []\n self.str_func = str_func\n<|end_body_0|>\n\n<|body_start_1|>\n if self.str_func is not None:\n return self.str_func(self.o)\n else:\n return str(self.o)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000352", "length_bytes": 15309, "license_type": "permissive", "methods": [{"docstring": "You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function", "name": "__init__", "signature": "def __init__(self, o, str_func=None)"}, {"docstring": "Override str(self.o) if str_func is defined.", "name": "__str__", "signature": "def __str__(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_047576", "prompt": "Implement the Python class `_Node` described below.\n\nClass description:\nA node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.\n\nMethod signatures and docstrings:\n- def __init__(self, o, str_func=None): You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function\n- def __str__(self): Override str(self.o) if str_func is defined.", "prompted_full_text": "Implement the Python class `_Node` described below.\n\nClass description:\nA node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.\n\nMethod signatures and docstrings:\n- def __init__(self, o, str_func=None): You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function\n- def __str__(self): Override str(self.o) if str_func is defined.\n\n<|skeleton|>\nclass _Node:\n \"\"\"A node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.\"\"\"\n\n def __init__(self, o, str_func=None):\n \"\"\"You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Override str(self.o) if str_func is defined.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.o = o\n self.children = []\n self.str_func = str_func\n<|end_body_0|>\n\n<|body_start_1|>\n if self.str_func is not None:\n return self.str_func(self.o)\n else:\n return str(self.o)\n<|end_body_1|>\n", "revision_id": "9a31d44415a27dd328740c64ec67e0fd2265fd5d", "skeleton": "<|skeleton|>\nclass _Node:\n \"\"\"A node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.\"\"\"\n\n def __init__(self, o, str_func=None):\n \"\"\"You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function\"\"\"\n <|body_0|>\n\n def __str__(self):\n \"\"\"Override str(self.o) if str_func is defined.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class _Node:\n \"\"\"A node as it is used in the tree structure. Each node contains the object it represents and a list of children. Children can be other nodes or arbitrary other objects. Any object in a tree which is not of the type _Node is considered a leaf.\"\"\"\n\n def __init__(self, o, str_func=None):\n \"\"\"You have to define the object this node represents. Also you can define an output function which will be used to represent this node. If no function is defined, the default str representation is used. keyword arguments str_func -- output function\"\"\"\n self.o = o\n self.children = []\n self.str_func = str_func\n\n def __str__(self):\n \"\"\"Override str(self.o) if str_func is defined.\"\"\"\n if self.str_func is not None:\n return self.str_func(self.o)\n else:\n return str(self.o)\n", "source": "the_stack_v2_python_sparse", "source_path": "pympler/refbrowser.py", "source_repo": "pympler/pympler", "split": "test", "star_events_count": 1050} {"blob_id": "0dede8582813858998aafa1921f7aa86ed0d54e4", "bodies": ["invalid = u'! # $ % ^ & * ( ) = + , : ; \" | ~ / \\\\ \\x00 \\u202a'.split()\nbase = u'User%sName'\nfor c in invalid:\n name = base % c\n assert not user.isValidName(self.request, name)", "cases = (u' User Name', u'User Name ', u'User Name')\nfor test in cases:\n assert not user.isValidName(self.request, test)", "cases = (u'Jürgen Hermann', u'ניר סופר', u'CamelCase', u'가각간갇갈 갉갊감 갬갯걀갼')\nfor test in cases:\n assert user.isValidName(self.request, test)"], "bodies_text": "<|body_start_0|>\n invalid = u'! # $ % ^ & * ( ) = + , : ; \" | ~ / \\\\ \\x00 \\u202a'.split()\n base = u'User%sName'\n for c in invalid:\n name = base % c\n assert not user.isValidName(self.request, name)\n<|end_body_0|>\n\n<|body_start_1|>\n cases = (u' User Name', u'User Name ', u'User Name')\n for test in cases:\n assert not user.isValidName(self.request, test)\n<|end_body_1|>\n\n<|body_start_2|>\n cases = (u'Jürgen Hermann', u'ניר סופר', u'CamelCase', u'가각간갇갈 갉갊감 갬갯걀갼')\n for test in cases:\n assert user.isValidName(self.request, test)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestIsValidName", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestIsValidName:\n\n def testNonAlnumCharacters(self):\n \"\"\"user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.\"\"\"\n <|body_0|>\n\n def testWhitespace(self):\n \"\"\"user: isValidName: reject leading, trailing or multiple whitespace\"\"\"\n <|body_1|>\n\n def testValid(self):\n \"\"\"user: isValidName: accept names in any language, with spaces\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n invalid = u'! # $ % ^ & * ( ) = + , : ; \" | ~ / \\\\ \\x00 \\u202a'.split()\n base = u'User%sName'\n for c in invalid:\n name = base % c\n assert not user.isValidName(self.request, name)\n<|end_body_0|>\n\n<|body_start_1|>\n cases = (u' User Name', u'User Name ', u'User Name')\n for test in cases:\n assert not user.isValidName(self.request, test)\n<|end_body_1|>\n\n<|body_start_2|>\n cases = (u'Jürgen Hermann', u'ניר סופר', u'CamelCase', u'가각간갇갈 갉갊감 갬갯걀갼')\n for test in cases:\n assert user.isValidName(self.request, test)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000353", "length_bytes": 11347, "license_type": "no_license", "methods": [{"docstring": "user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.", "name": "testNonAlnumCharacters", "signature": "def testNonAlnumCharacters(self)"}, {"docstring": "user: isValidName: reject leading, trailing or multiple whitespace", "name": "testWhitespace", "signature": "def testWhitespace(self)"}, {"docstring": "user: isValidName: accept names in any language, with spaces", "name": "testValid", "signature": "def testValid(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_014662", "prompt": "Implement the Python class `TestIsValidName` described below.\n\nClass description:\nImplement the TestIsValidName class.\n\nMethod signatures and docstrings:\n- def testNonAlnumCharacters(self): user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.\n- def testWhitespace(self): user: isValidName: reject leading, trailing or multiple whitespace\n- def testValid(self): user: isValidName: accept names in any language, with spaces", "prompted_full_text": "Implement the Python class `TestIsValidName` described below.\n\nClass description:\nImplement the TestIsValidName class.\n\nMethod signatures and docstrings:\n- def testNonAlnumCharacters(self): user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.\n- def testWhitespace(self): user: isValidName: reject leading, trailing or multiple whitespace\n- def testValid(self): user: isValidName: accept names in any language, with spaces\n\n<|skeleton|>\nclass TestIsValidName:\n\n def testNonAlnumCharacters(self):\n \"\"\"user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.\"\"\"\n <|body_0|>\n\n def testWhitespace(self):\n \"\"\"user: isValidName: reject leading, trailing or multiple whitespace\"\"\"\n <|body_1|>\n\n def testValid(self):\n \"\"\"user: isValidName: accept names in any language, with spaces\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n invalid = u'! # $ % ^ & * ( ) = + , : ; \" | ~ / \\\\ \\x00 \\u202a'.split()\n base = u'User%sName'\n for c in invalid:\n name = base % c\n assert not user.isValidName(self.request, name)\n<|end_body_0|>\n\n<|body_start_1|>\n cases = (u' User Name', u'User Name ', u'User Name')\n for test in cases:\n assert not user.isValidName(self.request, test)\n<|end_body_1|>\n\n<|body_start_2|>\n cases = (u'Jürgen Hermann', u'ניר סופר', u'CamelCase', u'가각간갇갈 갉갊감 갬갯걀갼')\n for test in cases:\n assert user.isValidName(self.request, test)\n<|end_body_2|>\n", "revision_id": "d6e801402c4538bdfb34a97cf07153101167c1ec", "skeleton": "<|skeleton|>\nclass TestIsValidName:\n\n def testNonAlnumCharacters(self):\n \"\"\"user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.\"\"\"\n <|body_0|>\n\n def testWhitespace(self):\n \"\"\"user: isValidName: reject leading, trailing or multiple whitespace\"\"\"\n <|body_1|>\n\n def testValid(self):\n \"\"\"user: isValidName: accept names in any language, with spaces\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestIsValidName:\n def testNonAlnumCharacters(self):\n \"\"\"user: isValidName: reject unicode non alpha numeric characters : and , used in acl rules, we might add more characters to the syntax.\"\"\"\n invalid = u'! # $ % ^ & * ( ) = + , : ; \" | ~ / \\\\ \\x00 \\u202a'.split()\n base = u'User%sName'\n for c in invalid:\n name = base % c\n assert not user.isValidName(self.request, name)\n\n def testWhitespace(self):\n \"\"\"user: isValidName: reject leading, trailing or multiple whitespace\"\"\"\n cases = (u' User Name', u'User Name ', u'User Name')\n for test in cases:\n assert not user.isValidName(self.request, test)\n\n def testValid(self):\n \"\"\"user: isValidName: accept names in any language, with spaces\"\"\"\n cases = (u'Jürgen Hermann', u'ניר סופר', u'CamelCase', u'가각간갇갈 갉갊감 갬갯걀갼')\n for test in cases:\n assert user.isValidName(self.request, test)\n", "source": "the_stack_v2_python_sparse", "source_path": "MoinMoin/_tests/test_user.py", "source_repo": "happytk/jardin", "split": "test", "star_events_count": 0} {"blob_id": "1d6a162314ca85a08809ba4951bc086306f2a435", "bodies": ["self.max_dist = max_dist\nself.grid = imread(path, 0).astype('float32') / 255.0\nself.height = self.grid.shape[0]\nself.width = self.grid.shape[1]", "omap_array = ((1.0 - self.grid.transpose()) * 255.0).astype('int')\nomap_array = np.tile(np.expand_dims(omap_array, axis=-1), [1, 1, 3])\npygame.surfarray.blit_array(surface, omap_array)", "for i in range(self.max_dist):\n pt_sonar = vec(i, 0)\n pt_map = mul(T_sonar_map, pt_sonar)\n r = int(pt_map[1])\n c = int(pt_map[0])\n if r < 0 or r >= self.grid.shape[0]:\n continue\n if c < 0 or c >= self.grid.shape[1]:\n continue\n if self.grid[r, c] > 0:\n return i + np.random.normal(0, 2, 1)[0]\nreturn 0.0"], "bodies_text": "<|body_start_0|>\n self.max_dist = max_dist\n self.grid = imread(path, 0).astype('float32') / 255.0\n self.height = self.grid.shape[0]\n self.width = self.grid.shape[1]\n<|end_body_0|>\n\n<|body_start_1|>\n omap_array = ((1.0 - self.grid.transpose()) * 255.0).astype('int')\n omap_array = np.tile(np.expand_dims(omap_array, axis=-1), [1, 1, 3])\n pygame.surfarray.blit_array(surface, omap_array)\n<|end_body_1|>\n\n<|body_start_2|>\n for i in range(self.max_dist):\n pt_sonar = vec(i, 0)\n pt_map = mul(T_sonar_map, pt_sonar)\n r = int(pt_map[1])\n c = int(pt_map[0])\n if r < 0 or r >= self.grid.shape[0]:\n continue\n if c < 0 or c >= self.grid.shape[1]:\n continue\n if self.grid[r, c] > 0:\n return i + np.random.normal(0, 2, 1)[0]\n return 0.0\n<|end_body_2|>\n", "class_docstring": "Maintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.", "class_name": "ObstacleMap", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ObstacleMap:\n \"\"\"Maintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.\"\"\"\n\n def __init__(self, path, max_dist=80):\n \"\"\"Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)\"\"\"\n <|body_0|>\n\n def draw(self, surface):\n \"\"\"Draws the obstacle map onto the surface.\"\"\"\n <|body_1|>\n\n def get_first_hit(self, T_sonar_map):\n \"\"\"Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max_dist = max_dist\n self.grid = imread(path, 0).astype('float32') / 255.0\n self.height = self.grid.shape[0]\n self.width = self.grid.shape[1]\n<|end_body_0|>\n\n<|body_start_1|>\n omap_array = ((1.0 - self.grid.transpose()) * 255.0).astype('int')\n omap_array = np.tile(np.expand_dims(omap_array, axis=-1), [1, 1, 3])\n pygame.surfarray.blit_array(surface, omap_array)\n<|end_body_1|>\n\n<|body_start_2|>\n for i in range(self.max_dist):\n pt_sonar = vec(i, 0)\n pt_map = mul(T_sonar_map, pt_sonar)\n r = int(pt_map[1])\n c = int(pt_map[0])\n if r < 0 or r >= self.grid.shape[0]:\n continue\n if c < 0 or c >= self.grid.shape[1]:\n continue\n if self.grid[r, c] > 0:\n return i + np.random.normal(0, 2, 1)[0]\n return 0.0\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000354", "length_bytes": 2765, "license_type": "no_license", "methods": [{"docstring": "Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)", "name": "__init__", "signature": "def __init__(self, path, max_dist=80)"}, {"docstring": "Draws the obstacle map onto the surface.", "name": "draw", "signature": "def draw(self, surface)"}, {"docstring": "Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.", "name": "get_first_hit", "signature": "def get_first_hit(self, T_sonar_map)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_002552", "prompt": "Implement the Python class `ObstacleMap` described below.\n\nClass description:\nMaintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.\n\nMethod signatures and docstrings:\n- def __init__(self, path, max_dist=80): Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)\n- def draw(self, surface): Draws the obstacle map onto the surface.\n- def get_first_hit(self, T_sonar_map): Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.", "prompted_full_text": "Implement the Python class `ObstacleMap` described below.\n\nClass description:\nMaintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.\n\nMethod signatures and docstrings:\n- def __init__(self, path, max_dist=80): Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)\n- def draw(self, surface): Draws the obstacle map onto the surface.\n- def get_first_hit(self, T_sonar_map): Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.\n\n<|skeleton|>\nclass ObstacleMap:\n \"\"\"Maintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.\"\"\"\n\n def __init__(self, path, max_dist=80):\n \"\"\"Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)\"\"\"\n <|body_0|>\n\n def draw(self, surface):\n \"\"\"Draws the obstacle map onto the surface.\"\"\"\n <|body_1|>\n\n def get_first_hit(self, T_sonar_map):\n \"\"\"Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.max_dist = max_dist\n self.grid = imread(path, 0).astype('float32') / 255.0\n self.height = self.grid.shape[0]\n self.width = self.grid.shape[1]\n<|end_body_0|>\n\n<|body_start_1|>\n omap_array = ((1.0 - self.grid.transpose()) * 255.0).astype('int')\n omap_array = np.tile(np.expand_dims(omap_array, axis=-1), [1, 1, 3])\n pygame.surfarray.blit_array(surface, omap_array)\n<|end_body_1|>\n\n<|body_start_2|>\n for i in range(self.max_dist):\n pt_sonar = vec(i, 0)\n pt_map = mul(T_sonar_map, pt_sonar)\n r = int(pt_map[1])\n c = int(pt_map[0])\n if r < 0 or r >= self.grid.shape[0]:\n continue\n if c < 0 or c >= self.grid.shape[1]:\n continue\n if self.grid[r, c] > 0:\n return i + np.random.normal(0, 2, 1)[0]\n return 0.0\n<|end_body_2|>\n", "revision_id": "b0473f9dff27af7b601a4553f609fbb68e8adbf1", "skeleton": "<|skeleton|>\nclass ObstacleMap:\n \"\"\"Maintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.\"\"\"\n\n def __init__(self, path, max_dist=80):\n \"\"\"Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)\"\"\"\n <|body_0|>\n\n def draw(self, surface):\n \"\"\"Draws the obstacle map onto the surface.\"\"\"\n <|body_1|>\n\n def get_first_hit(self, T_sonar_map):\n \"\"\"Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ObstacleMap:\n \"\"\"Maintains an obstacle map. The map contains binary values: 0 (free) or 1 (occupied). The map is stored as a matrix with shape (height,width). The map can be used to simulate rangefinder readings.\"\"\"\n\n def __init__(self, path, max_dist=80):\n \"\"\"Creates an obstacle map. Arguments: path: path to a grayscale image representing the map max_dist: maximum rangefinder reading (cm)\"\"\"\n self.max_dist = max_dist\n self.grid = imread(path, 0).astype('float32') / 255.0\n self.height = self.grid.shape[0]\n self.width = self.grid.shape[1]\n\n def draw(self, surface):\n \"\"\"Draws the obstacle map onto the surface.\"\"\"\n omap_array = ((1.0 - self.grid.transpose()) * 255.0).astype('int')\n omap_array = np.tile(np.expand_dims(omap_array, axis=-1), [1, 1, 3])\n pygame.surfarray.blit_array(surface, omap_array)\n\n def get_first_hit(self, T_sonar_map):\n \"\"\"Calculates distance that sonar would report given current pose. Arguments: T_sonar_map: sonar-to-map transformation matrix Returns: First-hit distance or zero if no hit.\"\"\"\n for i in range(self.max_dist):\n pt_sonar = vec(i, 0)\n pt_map = mul(T_sonar_map, pt_sonar)\n r = int(pt_map[1])\n c = int(pt_map[0])\n if r < 0 or r >= self.grid.shape[0]:\n continue\n if c < 0 or c >= self.grid.shape[1]:\n continue\n if self.grid[r, c] > 0:\n return i + np.random.normal(0, 2, 1)[0]\n return 0.0\n", "source": "the_stack_v2_python_sparse", "source_path": "HW3/ObstacleMap.py", "source_repo": "ericvelazquez/Robotics", "split": "test", "star_events_count": 1} {"blob_id": "4e06365419c9f21621a37c95bb312a48b13af3bc", "bodies": ["def dfs(left, right):\n if left is None and right is None:\n return True\n if left is None or right is None:\n return False\n if left.val != right.val:\n return False\n return dfs(left.left, right.right) and dfs(left.right, right.left)\nreturn dfs(root.left, root.right)", "if not root or not (root.left or root.right):\n return True\nqueue = [root.left, root.right]\nwhile queue:\n left = queue.pop(0)\n right = queue.pop(0)\n if not left and (not right):\n continue\n if not left or not right:\n return False\n if left.val != right.val:\n return False\n queue.append(left.left)\n queue.append(right.right)\n queue.append(left.right)\n queue.append(right.left)\nreturn True"], "bodies_text": "<|body_start_0|>\n def dfs(left, right):\n if left is None and right is None:\n return True\n if left is None or right is None:\n return False\n if left.val != right.val:\n return False\n return dfs(left.left, right.right) and dfs(left.right, right.left)\n return dfs(root.left, root.right)\n<|end_body_0|>\n\n<|body_start_1|>\n if not root or not (root.left or root.right):\n return True\n queue = [root.left, root.right]\n while queue:\n left = queue.pop(0)\n right = queue.pop(0)\n if not left and (not right):\n continue\n if not left or not right:\n return False\n if left.val != right.val:\n return False\n queue.append(left.left)\n queue.append(right.right)\n queue.append(left.right)\n queue.append(right.left)\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isSymmetric(self, root):\n \"\"\"采用递归 :type root: TreeNode :rtype: bool\"\"\"\n <|body_0|>\n\n def isSymmetric(self, root):\n \"\"\"采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(left, right):\n if left is None and right is None:\n return True\n if left is None or right is None:\n return False\n if left.val != right.val:\n return False\n return dfs(left.left, right.right) and dfs(left.right, right.left)\n return dfs(root.left, root.right)\n<|end_body_0|>\n\n<|body_start_1|>\n if not root or not (root.left or root.right):\n return True\n queue = [root.left, root.right]\n while queue:\n left = queue.pop(0)\n right = queue.pop(0)\n if not left and (not right):\n continue\n if not left or not right:\n return False\n if left.val != right.val:\n return False\n queue.append(left.left)\n queue.append(right.right)\n queue.append(left.right)\n queue.append(right.left)\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000355", "length_bytes": 1801, "license_type": "no_license", "methods": [{"docstring": "采用递归 :type root: TreeNode :rtype: bool", "name": "isSymmetric", "signature": "def isSymmetric(self, root)"}, {"docstring": "采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:", "name": "isSymmetric", "signature": "def isSymmetric(self, root)"}], "n_methods": 2, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isSymmetric(self, root): 采用递归 :type root: TreeNode :rtype: bool\n- def isSymmetric(self, root): 采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isSymmetric(self, root): 采用递归 :type root: TreeNode :rtype: bool\n- def isSymmetric(self, root): 采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:\n\n<|skeleton|>\nclass Solution:\n\n def isSymmetric(self, root):\n \"\"\"采用递归 :type root: TreeNode :rtype: bool\"\"\"\n <|body_0|>\n\n def isSymmetric(self, root):\n \"\"\"采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(left, right):\n if left is None and right is None:\n return True\n if left is None or right is None:\n return False\n if left.val != right.val:\n return False\n return dfs(left.left, right.right) and dfs(left.right, right.left)\n return dfs(root.left, root.right)\n<|end_body_0|>\n\n<|body_start_1|>\n if not root or not (root.left or root.right):\n return True\n queue = [root.left, root.right]\n while queue:\n left = queue.pop(0)\n right = queue.pop(0)\n if not left and (not right):\n continue\n if not left or not right:\n return False\n if left.val != right.val:\n return False\n queue.append(left.left)\n queue.append(right.right)\n queue.append(left.right)\n queue.append(right.left)\n return True\n<|end_body_1|>\n", "revision_id": "1040b5dbbe509abe42df848bc34dd1626d7a05fb", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isSymmetric(self, root):\n \"\"\"采用递归 :type root: TreeNode :rtype: bool\"\"\"\n <|body_0|>\n\n def isSymmetric(self, root):\n \"\"\"采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isSymmetric(self, root):\n \"\"\"采用递归 :type root: TreeNode :rtype: bool\"\"\"\n def dfs(left, right):\n if left is None and right is None:\n return True\n if left is None or right is None:\n return False\n if left.val != right.val:\n return False\n return dfs(left.left, right.right) and dfs(left.right, right.left)\n return dfs(root.left, root.right)\n\n def isSymmetric(self, root):\n \"\"\"采用迭代,改用队列实现 首先从队列中拿出两个节点(left和right)比较 将left的left节点和right的right节点放入队列 将left的right节点和right的left节点放入队列 :param root: :return:\"\"\"\n if not root or not (root.left or root.right):\n return True\n queue = [root.left, root.right]\n while queue:\n left = queue.pop(0)\n right = queue.pop(0)\n if not left and (not right):\n continue\n if not left or not right:\n return False\n if left.val != right.val:\n return False\n queue.append(left.left)\n queue.append(right.right)\n queue.append(left.right)\n queue.append(right.left)\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "tree/isSymmetric_101.py", "source_repo": "NJ-zero/LeetCode_Answer", "split": "test", "star_events_count": 1} {"blob_id": "8d1583271792b6666e0730ebebc7a109c9a50092", "bodies": ["try:\n cancel_btn = self.find_element(*self.cancel_loc)\nexcept NoSuchElementException:\n logging.info('=====cancel button does not exist in page =====')\nelse:\n cancel_btn.click()\n logging.info('=====Click Cancel btn====')", "try:\n skip_btn = self.find_element(*self.skip_loc)\nexcept NoSuchElementException:\n logging.info('=====skip button does not exist in page =====')\nelse:\n skip_btn.click()\n logging.info('=====Click skip btn====')"], "bodies_text": "<|body_start_0|>\n try:\n cancel_btn = self.find_element(*self.cancel_loc)\n except NoSuchElementException:\n logging.info('=====cancel button does not exist in page =====')\n else:\n cancel_btn.click()\n logging.info('=====Click Cancel btn====')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n skip_btn = self.find_element(*self.skip_loc)\n except NoSuchElementException:\n logging.info('=====skip button does not exist in page =====')\n else:\n skip_btn.click()\n logging.info('=====Click skip btn====')\n<|end_body_1|>\n", "class_docstring": "cancel_btn and skip btn", "class_name": "Common", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Common:\n \"\"\"cancel_btn and skip btn\"\"\"\n\n def check_cancelBtn(self):\n \"\"\"if there is a update button cancel it then :return:\"\"\"\n <|body_0|>\n\n def check_skipBtn(self):\n \"\"\"to skip advertising :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n cancel_btn = self.find_element(*self.cancel_loc)\n except NoSuchElementException:\n logging.info('=====cancel button does not exist in page =====')\n else:\n cancel_btn.click()\n logging.info('=====Click Cancel btn====')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n skip_btn = self.find_element(*self.skip_loc)\n except NoSuchElementException:\n logging.info('=====skip button does not exist in page =====')\n else:\n skip_btn.click()\n logging.info('=====Click skip btn====')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000356", "length_bytes": 1734, "license_type": "no_license", "methods": [{"docstring": "if there is a update button cancel it then :return:", "name": "check_cancelBtn", "signature": "def check_cancelBtn(self)"}, {"docstring": "to skip advertising :return:", "name": "check_skipBtn", "signature": "def check_skipBtn(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_042487", "prompt": "Implement the Python class `Common` described below.\n\nClass description:\ncancel_btn and skip btn\n\nMethod signatures and docstrings:\n- def check_cancelBtn(self): if there is a update button cancel it then :return:\n- def check_skipBtn(self): to skip advertising :return:", "prompted_full_text": "Implement the Python class `Common` described below.\n\nClass description:\ncancel_btn and skip btn\n\nMethod signatures and docstrings:\n- def check_cancelBtn(self): if there is a update button cancel it then :return:\n- def check_skipBtn(self): to skip advertising :return:\n\n<|skeleton|>\nclass Common:\n \"\"\"cancel_btn and skip btn\"\"\"\n\n def check_cancelBtn(self):\n \"\"\"if there is a update button cancel it then :return:\"\"\"\n <|body_0|>\n\n def check_skipBtn(self):\n \"\"\"to skip advertising :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n cancel_btn = self.find_element(*self.cancel_loc)\n except NoSuchElementException:\n logging.info('=====cancel button does not exist in page =====')\n else:\n cancel_btn.click()\n logging.info('=====Click Cancel btn====')\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n skip_btn = self.find_element(*self.skip_loc)\n except NoSuchElementException:\n logging.info('=====skip button does not exist in page =====')\n else:\n skip_btn.click()\n logging.info('=====Click skip btn====')\n<|end_body_1|>\n", "revision_id": "2e686b4e591d9cc0e9ab7f8c401cb4429b5a1d98", "skeleton": "<|skeleton|>\nclass Common:\n \"\"\"cancel_btn and skip btn\"\"\"\n\n def check_cancelBtn(self):\n \"\"\"if there is a update button cancel it then :return:\"\"\"\n <|body_0|>\n\n def check_skipBtn(self):\n \"\"\"to skip advertising :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Common:\n \"\"\"cancel_btn and skip btn\"\"\"\n\n def check_cancelBtn(self):\n \"\"\"if there is a update button cancel it then :return:\"\"\"\n try:\n cancel_btn = self.find_element(*self.cancel_loc)\n except NoSuchElementException:\n logging.info('=====cancel button does not exist in page =====')\n else:\n cancel_btn.click()\n logging.info('=====Click Cancel btn====')\n\n def check_skipBtn(self):\n \"\"\"to skip advertising :return:\"\"\"\n try:\n skip_btn = self.find_element(*self.skip_loc)\n except NoSuchElementException:\n logging.info('=====skip button does not exist in page =====')\n else:\n skip_btn.click()\n logging.info('=====Click skip btn====')\n", "source": "the_stack_v2_python_sparse", "source_path": "appium/common_fun.py", "source_repo": "Onebigbera/appium_practice", "split": "test", "star_events_count": 1} {"blob_id": "8ff67c32c27c3ce8e9e2d623ec1b626734b61c7f", "bodies": ["super().__init__(fmt='%(message)s', datefmt='%Y-%m-%d %H:%M:%S', style='%')\nself.append_br = append_br\nself.replace_nl_with_br = replace_nl_with_br", "super().format(record)\nrecord.asctime = self.formatTime(record, self.datefmt)\nbg_col = self.log_background_colors[record.levelno]\nmsg = escape(record.getMessage())\nif self.replace_nl_with_br:\n msg = msg.replace('\\n', '
')\nhtml = '{time}.{ms:03d} {name}:{lvname}: {msg}{br}'.format(time=record.asctime, ms=int(record.msecs), name=record.name, lvname=record.levelname, color=self.log_colors[record.levelno], msg=msg, bg=f';background-color:{bg_col}' if bg_col else '', br='
' if self.append_br else '')\nreturn html"], "bodies_text": "<|body_start_0|>\n super().__init__(fmt='%(message)s', datefmt='%Y-%m-%d %H:%M:%S', style='%')\n self.append_br = append_br\n self.replace_nl_with_br = replace_nl_with_br\n<|end_body_0|>\n\n<|body_start_1|>\n super().format(record)\n record.asctime = self.formatTime(record, self.datefmt)\n bg_col = self.log_background_colors[record.levelno]\n msg = escape(record.getMessage())\n if self.replace_nl_with_br:\n msg = msg.replace('\\n', '
')\n html = '{time}.{ms:03d} {name}:{lvname}: {msg}{br}'.format(time=record.asctime, ms=int(record.msecs), name=record.name, lvname=record.levelname, color=self.log_colors[record.levelno], msg=msg, bg=f';background-color:{bg_col}' if bg_col else '', br='
' if self.append_br else '')\n return html\n<|end_body_1|>\n", "class_docstring": "Class to format Python logs in coloured HTML.", "class_name": "HtmlColorFormatter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HtmlColorFormatter:\n \"\"\"Class to format Python logs in coloured HTML.\"\"\"\n\n def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None:\n \"\"\"Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py\"\"\"\n <|body_0|>\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(fmt='%(message)s', datefmt='%Y-%m-%d %H:%M:%S', style='%')\n self.append_br = append_br\n self.replace_nl_with_br = replace_nl_with_br\n<|end_body_0|>\n\n<|body_start_1|>\n super().format(record)\n record.asctime = self.formatTime(record, self.datefmt)\n bg_col = self.log_background_colors[record.levelno]\n msg = escape(record.getMessage())\n if self.replace_nl_with_br:\n msg = msg.replace('\\n', '
')\n html = '{time}.{ms:03d} {name}:{lvname}: {msg}{br}'.format(time=record.asctime, ms=int(record.msecs), name=record.name, lvname=record.levelname, color=self.log_colors[record.levelno], msg=msg, bg=f';background-color:{bg_col}' if bg_col else '', br='
' if self.append_br else '')\n return html\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000357", "length_bytes": 26012, "license_type": "permissive", "methods": [{"docstring": "Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py", "name": "__init__", "signature": "def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None"}, {"docstring": "Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord", "name": "format", "signature": "def format(self, record: logging.LogRecord) -> str"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000074", "prompt": "Implement the Python class `HtmlColorFormatter` described below.\n\nClass description:\nClass to format Python logs in coloured HTML.\n\nMethod signatures and docstrings:\n- def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None: Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py\n- def format(self, record: logging.LogRecord) -> str: Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord", "prompted_full_text": "Implement the Python class `HtmlColorFormatter` described below.\n\nClass description:\nClass to format Python logs in coloured HTML.\n\nMethod signatures and docstrings:\n- def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None: Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py\n- def format(self, record: logging.LogRecord) -> str: Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord\n\n<|skeleton|>\nclass HtmlColorFormatter:\n \"\"\"Class to format Python logs in coloured HTML.\"\"\"\n\n def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None:\n \"\"\"Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py\"\"\"\n <|body_0|>\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(fmt='%(message)s', datefmt='%Y-%m-%d %H:%M:%S', style='%')\n self.append_br = append_br\n self.replace_nl_with_br = replace_nl_with_br\n<|end_body_0|>\n\n<|body_start_1|>\n super().format(record)\n record.asctime = self.formatTime(record, self.datefmt)\n bg_col = self.log_background_colors[record.levelno]\n msg = escape(record.getMessage())\n if self.replace_nl_with_br:\n msg = msg.replace('\\n', '
')\n html = '{time}.{ms:03d} {name}:{lvname}: {msg}{br}'.format(time=record.asctime, ms=int(record.msecs), name=record.name, lvname=record.levelname, color=self.log_colors[record.levelno], msg=msg, bg=f';background-color:{bg_col}' if bg_col else '', br='
' if self.append_br else '')\n return html\n<|end_body_1|>\n", "revision_id": "86ec00e039a85b90609c8fe4b221d183912eaec4", "skeleton": "<|skeleton|>\nclass HtmlColorFormatter:\n \"\"\"Class to format Python logs in coloured HTML.\"\"\"\n\n def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None:\n \"\"\"Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py\"\"\"\n <|body_0|>\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HtmlColorFormatter:\n \"\"\"Class to format Python logs in coloured HTML.\"\"\"\n\n def __init__(self, append_br: bool=False, replace_nl_with_br: bool=True) -> None:\n \"\"\"Args: append_br: append ``
`` to each line? replace_nl_with_br: replace ``\\\\n`` with ``
`` in messages? See https://hg.python.org/cpython/file/3.5/Lib/logging/__init__.py\"\"\"\n super().__init__(fmt='%(message)s', datefmt='%Y-%m-%d %H:%M:%S', style='%')\n self.append_br = append_br\n self.replace_nl_with_br = replace_nl_with_br\n\n def format(self, record: logging.LogRecord) -> str:\n \"\"\"Internal function to format the :class:`LogRecord` as HTML. See https://docs.python.org/3.4/library/logging.html#logging.LogRecord\"\"\"\n super().format(record)\n record.asctime = self.formatTime(record, self.datefmt)\n bg_col = self.log_background_colors[record.levelno]\n msg = escape(record.getMessage())\n if self.replace_nl_with_br:\n msg = msg.replace('\\n', '
')\n html = '{time}.{ms:03d} {name}:{lvname}: {msg}{br}'.format(time=record.asctime, ms=int(record.msecs), name=record.name, lvname=record.levelname, color=self.log_colors[record.levelno], msg=msg, bg=f';background-color:{bg_col}' if bg_col else '', br='
' if self.append_br else '')\n return html\n", "source": "the_stack_v2_python_sparse", "source_path": "cardinal_pythonlib/logs.py", "source_repo": "RudolfCardinal/pythonlib", "split": "test", "star_events_count": 12} {"blob_id": "563ddd71f908ed0ac6d13cb04963be240135e0dc", "bodies": ["try:\n float(s)\n return True\nexcept ValueError:\n pass\nreturn False", "euros = []\nfor i in range(0, len(prices)):\n price = prices[i]\n if self.is_number(price):\n euro = '€{:.2f}'.format(float(price) * interest)\n euros.append(\"'\" + euro.replace('.', ',') + \"'\")\n else:\n euros.append(price)\n continue\nreturn euros", "dollars = []\nfor i in range(0, len(prices)):\n price = prices[i][2:len(prices[i]) - 1]\n price = price.replace(',', '.')\n if self.is_number(price):\n dollar = '{:.2f}'.format(float(price) / interest)\n dollars.append(dollar)\n else:\n dollars.append(price)\n continue\nreturn dollars"], "bodies_text": "<|body_start_0|>\n try:\n float(s)\n return True\n except ValueError:\n pass\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n euros = []\n for i in range(0, len(prices)):\n price = prices[i]\n if self.is_number(price):\n euro = '€{:.2f}'.format(float(price) * interest)\n euros.append(\"'\" + euro.replace('.', ',') + \"'\")\n else:\n euros.append(price)\n continue\n return euros\n<|end_body_1|>\n\n<|body_start_2|>\n dollars = []\n for i in range(0, len(prices)):\n price = prices[i][2:len(prices[i]) - 1]\n price = price.replace(',', '.')\n if self.is_number(price):\n dollar = '{:.2f}'.format(float(price) / interest)\n dollars.append(dollar)\n else:\n dollars.append(price)\n continue\n return dollars\n<|end_body_2|>\n", "class_docstring": "货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元", "class_name": "CurrencyStringConverter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CurrencyStringConverter:\n \"\"\"货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元\"\"\"\n\n def is_number(self, s):\n \"\"\"判断字符串是否为数字 :param s: 字符串 :return:\"\"\"\n <|body_0|>\n\n def dollar_to_euro(self, prices, interest):\n \"\"\"美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n <|body_1|>\n\n def euro_to_dollar(self, prices, interest):\n \"\"\"欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n float(s)\n return True\n except ValueError:\n pass\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n euros = []\n for i in range(0, len(prices)):\n price = prices[i]\n if self.is_number(price):\n euro = '€{:.2f}'.format(float(price) * interest)\n euros.append(\"'\" + euro.replace('.', ',') + \"'\")\n else:\n euros.append(price)\n continue\n return euros\n<|end_body_1|>\n\n<|body_start_2|>\n dollars = []\n for i in range(0, len(prices)):\n price = prices[i][2:len(prices[i]) - 1]\n price = price.replace(',', '.')\n if self.is_number(price):\n dollar = '{:.2f}'.format(float(price) / interest)\n dollars.append(dollar)\n else:\n dollars.append(price)\n continue\n return dollars\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000358", "length_bytes": 4256, "license_type": "no_license", "methods": [{"docstring": "判断字符串是否为数字 :param s: 字符串 :return:", "name": "is_number", "signature": "def is_number(self, s)"}, {"docstring": "美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成", "name": "dollar_to_euro", "signature": "def dollar_to_euro(self, prices, interest)"}, {"docstring": "欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成", "name": "euro_to_dollar", "signature": "def euro_to_dollar(self, prices, interest)"}], "n_methods": 3, "prompt": "Implement the Python class `CurrencyStringConverter` described below.\n\nClass description:\n货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元\n\nMethod signatures and docstrings:\n- def is_number(self, s): 判断字符串是否为数字 :param s: 字符串 :return:\n- def dollar_to_euro(self, prices, interest): 美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\n- def euro_to_dollar(self, prices, interest): 欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成", "prompted_full_text": "Implement the Python class `CurrencyStringConverter` described below.\n\nClass description:\n货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元\n\nMethod signatures and docstrings:\n- def is_number(self, s): 判断字符串是否为数字 :param s: 字符串 :return:\n- def dollar_to_euro(self, prices, interest): 美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\n- def euro_to_dollar(self, prices, interest): 欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\n\n<|skeleton|>\nclass CurrencyStringConverter:\n \"\"\"货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元\"\"\"\n\n def is_number(self, s):\n \"\"\"判断字符串是否为数字 :param s: 字符串 :return:\"\"\"\n <|body_0|>\n\n def dollar_to_euro(self, prices, interest):\n \"\"\"美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n <|body_1|>\n\n def euro_to_dollar(self, prices, interest):\n \"\"\"欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n float(s)\n return True\n except ValueError:\n pass\n return False\n<|end_body_0|>\n\n<|body_start_1|>\n euros = []\n for i in range(0, len(prices)):\n price = prices[i]\n if self.is_number(price):\n euro = '€{:.2f}'.format(float(price) * interest)\n euros.append(\"'\" + euro.replace('.', ',') + \"'\")\n else:\n euros.append(price)\n continue\n return euros\n<|end_body_1|>\n\n<|body_start_2|>\n dollars = []\n for i in range(0, len(prices)):\n price = prices[i][2:len(prices[i]) - 1]\n price = price.replace(',', '.')\n if self.is_number(price):\n dollar = '{:.2f}'.format(float(price) / interest)\n dollars.append(dollar)\n else:\n dollars.append(price)\n continue\n return dollars\n<|end_body_2|>\n", "revision_id": "9f25ac037db9c2c619862dbe0240b66b35b8f239", "skeleton": "<|skeleton|>\nclass CurrencyStringConverter:\n \"\"\"货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元\"\"\"\n\n def is_number(self, s):\n \"\"\"判断字符串是否为数字 :param s: 字符串 :return:\"\"\"\n <|body_0|>\n\n def dollar_to_euro(self, prices, interest):\n \"\"\"美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n <|body_1|>\n\n def euro_to_dollar(self, prices, interest):\n \"\"\"欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CurrencyStringConverter:\n \"\"\"货币字符串转换类,负责转换货币种类 美元转换为欧元 欧元转换为美元\"\"\"\n\n def is_number(self, s):\n \"\"\"判断字符串是否为数字 :param s: 字符串 :return:\"\"\"\n try:\n float(s)\n return True\n except ValueError:\n pass\n return False\n\n def dollar_to_euro(self, prices, interest):\n \"\"\"美元转换为欧元 实例:74.23*0.8=€59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n euros = []\n for i in range(0, len(prices)):\n price = prices[i]\n if self.is_number(price):\n euro = '€{:.2f}'.format(float(price) * interest)\n euros.append(\"'\" + euro.replace('.', ',') + \"'\")\n else:\n euros.append(price)\n continue\n return euros\n\n def euro_to_dollar(self, prices, interest):\n \"\"\"欧元欧元转换为美元 实例:€59,38/0.8=74.23 先取出59,38转换为59.38 保留小数点后两位 :param prices:价钱列表 由字符串组成,判断元素是否为数字 :param interest:利率 :return:列表 有字符串组成\"\"\"\n dollars = []\n for i in range(0, len(prices)):\n price = prices[i][2:len(prices[i]) - 1]\n price = price.replace(',', '.')\n if self.is_number(price):\n dollar = '{:.2f}'.format(float(price) / interest)\n dollars.append(dollar)\n else:\n dollars.append(price)\n continue\n return dollars\n", "source": "the_stack_v2_python_sparse", "source_path": "13/currency_string_converter.py", "source_repo": "NancyHui/ZSXQ", "split": "test", "star_events_count": 0} {"blob_id": "63bcb3ce22cc1b38adcdc930f6b91f3f90f2aa10", "bodies": ["sjFile = 'input_files/test_junctions.txt'\ntmp_dir = 'scratch/test_jns/TC_tmp/'\nchroms = set(['chr1'])\ndonors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\ngenome = Fasta('input_files/hg38_chr1.fa')\nsam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:6']\ntranscript = t2.Transcript(sam_fields, genome, sjAnnot)\njnNumber = 0\nmaxDist = 1\ncorrection_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\nassert correction_status == False\nassert reason == 'TooFarFromAnnotJn'\nassert dist == 2", "sjFile = 'input_files/test_junctions.txt'\noutprefix = 'scratch/test_jns/'\ntmp_dir = 'scratch/test_jns/TC_tmp/'\nchroms = set(['chr1'])\ndonors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\ngenome = Fasta('input_files/hg38_chr1.fa')\nsam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:4']\ntranscript = t2.Transcript(sam_fields, genome, sjAnnot)\njnNumber = 0\nmaxDist = 5\ncorrection_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\nassert correction_status == True\nassert reason == 'NA'\nassert dist == 2", "sjFile = 'input_files/drosophila_example/chr3R_SJs.tsv'\noutprefix = 'scratch/dmel_crash/'\ntmp_dir = 'scratch/dmel_crash/TC_tmp/'\nchroms = set(['chr3R'])\ndonors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\ngenome = Fasta('input_files/drosophila_example/chr3R.fa')\nsam_fields = ['test_read', '0', 'chr3R', '14890420', '255', '7M7D2M264N7M', '*', '0', '0', 'GATCAAACAACAAGTC', '*']\ntranscript = t2.Transcript(sam_fields, genome, sjAnnot)\njnNumber = 0\nmaxDist = 5\ncorrection_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\nassert correction_status == False\nassert reason == 'Other'\nassert dist == 5"], "bodies_text": "<|body_start_0|>\n sjFile = 'input_files/test_junctions.txt'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:6']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 1\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'TooFarFromAnnotJn'\n assert dist == 2\n<|end_body_0|>\n\n<|body_start_1|>\n sjFile = 'input_files/test_junctions.txt'\n outprefix = 'scratch/test_jns/'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:4']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == True\n assert reason == 'NA'\n assert dist == 2\n<|end_body_1|>\n\n<|body_start_2|>\n sjFile = 'input_files/drosophila_example/chr3R_SJs.tsv'\n outprefix = 'scratch/dmel_crash/'\n tmp_dir = 'scratch/dmel_crash/TC_tmp/'\n chroms = set(['chr3R'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/drosophila_example/chr3R.fa')\n sam_fields = ['test_read', '0', 'chr3R', '14890420', '255', '7M7D2M264N7M', '*', '0', '0', 'GATCAAACAACAAGTC', '*']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'Other'\n assert dist == 5\n<|end_body_2|>\n", "class_docstring": "", "class_name": "TestAttemptJnCorrection", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestAttemptJnCorrection:\n\n def test_too_far_away(self):\n \"\"\"A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126\"\"\"\n <|body_0|>\n\n def test_correct_jn(self):\n \"\"\"Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126\"\"\"\n <|body_1|>\n\n def test_crash(self):\n \"\"\"This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sjFile = 'input_files/test_junctions.txt'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:6']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 1\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'TooFarFromAnnotJn'\n assert dist == 2\n<|end_body_0|>\n\n<|body_start_1|>\n sjFile = 'input_files/test_junctions.txt'\n outprefix = 'scratch/test_jns/'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:4']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == True\n assert reason == 'NA'\n assert dist == 2\n<|end_body_1|>\n\n<|body_start_2|>\n sjFile = 'input_files/drosophila_example/chr3R_SJs.tsv'\n outprefix = 'scratch/dmel_crash/'\n tmp_dir = 'scratch/dmel_crash/TC_tmp/'\n chroms = set(['chr3R'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/drosophila_example/chr3R.fa')\n sam_fields = ['test_read', '0', 'chr3R', '14890420', '255', '7M7D2M264N7M', '*', '0', '0', 'GATCAAACAACAAGTC', '*']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'Other'\n assert dist == 5\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000359", "length_bytes": 5278, "license_type": "permissive", "methods": [{"docstring": "A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126", "name": "test_too_far_away", "signature": "def test_too_far_away(self)"}, {"docstring": "Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126", "name": "test_correct_jn", "signature": "def test_correct_jn(self)"}, {"docstring": "This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log", "name": "test_crash", "signature": "def test_crash(self)"}], "n_methods": 3, "prompt": "Implement the Python class `TestAttemptJnCorrection` described below.\n\nClass description:\nImplement the TestAttemptJnCorrection class.\n\nMethod signatures and docstrings:\n- def test_too_far_away(self): A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126\n- def test_correct_jn(self): Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126\n- def test_crash(self): This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log", "prompted_full_text": "Implement the Python class `TestAttemptJnCorrection` described below.\n\nClass description:\nImplement the TestAttemptJnCorrection class.\n\nMethod signatures and docstrings:\n- def test_too_far_away(self): A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126\n- def test_correct_jn(self): Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126\n- def test_crash(self): This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log\n\n<|skeleton|>\nclass TestAttemptJnCorrection:\n\n def test_too_far_away(self):\n \"\"\"A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126\"\"\"\n <|body_0|>\n\n def test_correct_jn(self):\n \"\"\"Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126\"\"\"\n <|body_1|>\n\n def test_crash(self):\n \"\"\"This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n sjFile = 'input_files/test_junctions.txt'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:6']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 1\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'TooFarFromAnnotJn'\n assert dist == 2\n<|end_body_0|>\n\n<|body_start_1|>\n sjFile = 'input_files/test_junctions.txt'\n outprefix = 'scratch/test_jns/'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:4']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == True\n assert reason == 'NA'\n assert dist == 2\n<|end_body_1|>\n\n<|body_start_2|>\n sjFile = 'input_files/drosophila_example/chr3R_SJs.tsv'\n outprefix = 'scratch/dmel_crash/'\n tmp_dir = 'scratch/dmel_crash/TC_tmp/'\n chroms = set(['chr3R'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/drosophila_example/chr3R.fa')\n sam_fields = ['test_read', '0', 'chr3R', '14890420', '255', '7M7D2M264N7M', '*', '0', '0', 'GATCAAACAACAAGTC', '*']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'Other'\n assert dist == 5\n<|end_body_2|>\n", "revision_id": "ae9e71556a6b2e2c1fd80a9dca8fbab8d8196206", "skeleton": "<|skeleton|>\nclass TestAttemptJnCorrection:\n\n def test_too_far_away(self):\n \"\"\"A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126\"\"\"\n <|body_0|>\n\n def test_correct_jn(self):\n \"\"\"Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126\"\"\"\n <|body_1|>\n\n def test_crash(self):\n \"\"\"This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestAttemptJnCorrection:\n def test_too_far_away(self):\n \"\"\"A case where the NCSJ should not be corrected because it is too far away from the closest annotated junction relative to the maxDist parameter. Toy transcript with sequence A|GAA, where the splice motif is noncanonical. chr1: 23,071,357 - 23,072,126\"\"\"\n sjFile = 'input_files/test_junctions.txt'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:6']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 1\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'TooFarFromAnnotJn'\n assert dist == 2\n\n def test_correct_jn(self):\n \"\"\"Toy transcript with sequence A|GAA, where the splice motif is noncanonical but located 2 bp from a canonical splice donor. chr1: 23,071,357 - 23,072,126\"\"\"\n sjFile = 'input_files/test_junctions.txt'\n outprefix = 'scratch/test_jns/'\n tmp_dir = 'scratch/test_jns/TC_tmp/'\n chroms = set(['chr1'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/hg38_chr1.fa')\n sam_fields = ['test_read', '0', 'chr1', '23071357', '255', '1M766N3M', '*', '0', '0', 'AGAA', '*', 'NM:i:0', 'MD:Z:4']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == True\n assert reason == 'NA'\n assert dist == 2\n\n def test_crash(self):\n \"\"\"This is a Drosophila junction that borders a small match preceded by a 7 bp deletion. It is supposed to crash correction, which will result in a categorization of 'Other' in the log\"\"\"\n sjFile = 'input_files/drosophila_example/chr3R_SJs.tsv'\n outprefix = 'scratch/dmel_crash/'\n tmp_dir = 'scratch/dmel_crash/TC_tmp/'\n chroms = set(['chr3R'])\n donors, acceptors, sjAnnot = TC.processSpliceAnnotation(sjFile, tmp_dir, chroms)\n genome = Fasta('input_files/drosophila_example/chr3R.fa')\n sam_fields = ['test_read', '0', 'chr3R', '14890420', '255', '7M7D2M264N7M', '*', '0', '0', 'GATCAAACAACAAGTC', '*']\n transcript = t2.Transcript(sam_fields, genome, sjAnnot)\n jnNumber = 0\n maxDist = 5\n correction_status, reason, dist = TC.attempt_jn_correction(transcript, jnNumber, genome, donors, acceptors, sjAnnot, maxDist)\n assert correction_status == False\n assert reason == 'Other'\n assert dist == 5\n", "source": "the_stack_v2_python_sparse", "source_path": "testing_suite/test_attempt_jn_correction.py", "source_repo": "hzongyao/TranscriptClean", "split": "test", "star_events_count": 0} {"blob_id": "f79d1dcdeadbefb151237efbe82b5697abd04e30", "bodies": ["try:\n self.sqlhandler = None\n self.practice = list()\n self.classId = self.get_argument('classId')\n self.practiceId = self.get_body_argument('practiceId')\n if self.getStuPractice():\n print(self.practicelist)\n self.write({self.practicelist})\n self.finish()\n else:\n raise RuntimeError\nexcept Exception:\n self.write('error')\n self.finish()\nfinally:\n if self.sqlhandler is not None:\n self.sqlhandler.closeMySql()\n tornado.ioloop.IOLoop.current().stop()", "self.sqlhandler = SqlHandler.SqlHandler(Host='139.159.176.78', User='root', Password='liyuhang8', DBName='PersonDatabase')\nif self.sqlhandler.getConnection():\n '\\n 查询练习题\\n '\n sql = \"select Student from CLASS where ClassId='\" + self.classId + \"'\"\n stuIdList = str(self.sqlhandler.executeQuerySQL(sql)[0]['Student']).split(',')\n for stuId in stuIdList:\n sql = \"select * from SCORE where PracticeId='{0}' and StuId='{1}'\".format(self.practiceId, stuId)\n print(sql)\n if len(self.sqlhandler.executeQuerySQL(sql)) == 1:\n isDone = True\n else:\n isDone = False\n sql = \"select StuName from StuPersonInfo where StuId='{0}''\".format(stuId)\n stuName = self.sqlhandler.executeQuerySQL(sql)[0]['StuName']\n self.practicelist.append({'stuId': stuId, 'stuName': stuName, 'isDone': isDone})\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n try:\n self.sqlhandler = None\n self.practice = list()\n self.classId = self.get_argument('classId')\n self.practiceId = self.get_body_argument('practiceId')\n if self.getStuPractice():\n print(self.practicelist)\n self.write({self.practicelist})\n self.finish()\n else:\n raise RuntimeError\n except Exception:\n self.write('error')\n self.finish()\n finally:\n if self.sqlhandler is not None:\n self.sqlhandler.closeMySql()\n tornado.ioloop.IOLoop.current().stop()\n<|end_body_0|>\n\n<|body_start_1|>\n self.sqlhandler = SqlHandler.SqlHandler(Host='139.159.176.78', User='root', Password='liyuhang8', DBName='PersonDatabase')\n if self.sqlhandler.getConnection():\n '\\n 查询练习题\\n '\n sql = \"select Student from CLASS where ClassId='\" + self.classId + \"'\"\n stuIdList = str(self.sqlhandler.executeQuerySQL(sql)[0]['Student']).split(',')\n for stuId in stuIdList:\n sql = \"select * from SCORE where PracticeId='{0}' and StuId='{1}'\".format(self.practiceId, stuId)\n print(sql)\n if len(self.sqlhandler.executeQuerySQL(sql)) == 1:\n isDone = True\n else:\n isDone = False\n sql = \"select StuName from StuPersonInfo where StuId='{0}''\".format(stuId)\n stuName = self.sqlhandler.executeQuerySQL(sql)[0]['StuName']\n self.practicelist.append({'stuId': stuId, 'stuName': stuName, 'isDone': isDone})\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TeaGetPracticeRequestHandler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TeaGetPracticeRequestHandler:\n\n def get(self):\n \"\"\"获取练习题列表,返回给老师客户端\"\"\"\n <|body_0|>\n\n def getStuPractice(self):\n \"\"\"返回班级本次所有学生习题列表\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.sqlhandler = None\n self.practice = list()\n self.classId = self.get_argument('classId')\n self.practiceId = self.get_body_argument('practiceId')\n if self.getStuPractice():\n print(self.practicelist)\n self.write({self.practicelist})\n self.finish()\n else:\n raise RuntimeError\n except Exception:\n self.write('error')\n self.finish()\n finally:\n if self.sqlhandler is not None:\n self.sqlhandler.closeMySql()\n tornado.ioloop.IOLoop.current().stop()\n<|end_body_0|>\n\n<|body_start_1|>\n self.sqlhandler = SqlHandler.SqlHandler(Host='139.159.176.78', User='root', Password='liyuhang8', DBName='PersonDatabase')\n if self.sqlhandler.getConnection():\n '\\n 查询练习题\\n '\n sql = \"select Student from CLASS where ClassId='\" + self.classId + \"'\"\n stuIdList = str(self.sqlhandler.executeQuerySQL(sql)[0]['Student']).split(',')\n for stuId in stuIdList:\n sql = \"select * from SCORE where PracticeId='{0}' and StuId='{1}'\".format(self.practiceId, stuId)\n print(sql)\n if len(self.sqlhandler.executeQuerySQL(sql)) == 1:\n isDone = True\n else:\n isDone = False\n sql = \"select StuName from StuPersonInfo where StuId='{0}''\".format(stuId)\n stuName = self.sqlhandler.executeQuerySQL(sql)[0]['StuName']\n self.practicelist.append({'stuId': stuId, 'stuName': stuName, 'isDone': isDone})\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000360", "length_bytes": 2685, "license_type": "no_license", "methods": [{"docstring": "获取练习题列表,返回给老师客户端", "name": "get", "signature": "def get(self)"}, {"docstring": "返回班级本次所有学生习题列表", "name": "getStuPractice", "signature": "def getStuPractice(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021174", "prompt": "Implement the Python class `TeaGetPracticeRequestHandler` described below.\n\nClass description:\nImplement the TeaGetPracticeRequestHandler class.\n\nMethod signatures and docstrings:\n- def get(self): 获取练习题列表,返回给老师客户端\n- def getStuPractice(self): 返回班级本次所有学生习题列表", "prompted_full_text": "Implement the Python class `TeaGetPracticeRequestHandler` described below.\n\nClass description:\nImplement the TeaGetPracticeRequestHandler class.\n\nMethod signatures and docstrings:\n- def get(self): 获取练习题列表,返回给老师客户端\n- def getStuPractice(self): 返回班级本次所有学生习题列表\n\n<|skeleton|>\nclass TeaGetPracticeRequestHandler:\n\n def get(self):\n \"\"\"获取练习题列表,返回给老师客户端\"\"\"\n <|body_0|>\n\n def getStuPractice(self):\n \"\"\"返回班级本次所有学生习题列表\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n self.sqlhandler = None\n self.practice = list()\n self.classId = self.get_argument('classId')\n self.practiceId = self.get_body_argument('practiceId')\n if self.getStuPractice():\n print(self.practicelist)\n self.write({self.practicelist})\n self.finish()\n else:\n raise RuntimeError\n except Exception:\n self.write('error')\n self.finish()\n finally:\n if self.sqlhandler is not None:\n self.sqlhandler.closeMySql()\n tornado.ioloop.IOLoop.current().stop()\n<|end_body_0|>\n\n<|body_start_1|>\n self.sqlhandler = SqlHandler.SqlHandler(Host='139.159.176.78', User='root', Password='liyuhang8', DBName='PersonDatabase')\n if self.sqlhandler.getConnection():\n '\\n 查询练习题\\n '\n sql = \"select Student from CLASS where ClassId='\" + self.classId + \"'\"\n stuIdList = str(self.sqlhandler.executeQuerySQL(sql)[0]['Student']).split(',')\n for stuId in stuIdList:\n sql = \"select * from SCORE where PracticeId='{0}' and StuId='{1}'\".format(self.practiceId, stuId)\n print(sql)\n if len(self.sqlhandler.executeQuerySQL(sql)) == 1:\n isDone = True\n else:\n isDone = False\n sql = \"select StuName from StuPersonInfo where StuId='{0}''\".format(stuId)\n stuName = self.sqlhandler.executeQuerySQL(sql)[0]['StuName']\n self.practicelist.append({'stuId': stuId, 'stuName': stuName, 'isDone': isDone})\n return True\n return False\n<|end_body_1|>\n", "revision_id": "b28eb4163b02bd0a931653b94851592f2654b199", "skeleton": "<|skeleton|>\nclass TeaGetPracticeRequestHandler:\n\n def get(self):\n \"\"\"获取练习题列表,返回给老师客户端\"\"\"\n <|body_0|>\n\n def getStuPractice(self):\n \"\"\"返回班级本次所有学生习题列表\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TeaGetPracticeRequestHandler:\n def get(self):\n \"\"\"获取练习题列表,返回给老师客户端\"\"\"\n try:\n self.sqlhandler = None\n self.practice = list()\n self.classId = self.get_argument('classId')\n self.practiceId = self.get_body_argument('practiceId')\n if self.getStuPractice():\n print(self.practicelist)\n self.write({self.practicelist})\n self.finish()\n else:\n raise RuntimeError\n except Exception:\n self.write('error')\n self.finish()\n finally:\n if self.sqlhandler is not None:\n self.sqlhandler.closeMySql()\n tornado.ioloop.IOLoop.current().stop()\n\n def getStuPractice(self):\n \"\"\"返回班级本次所有学生习题列表\"\"\"\n self.sqlhandler = SqlHandler.SqlHandler(Host='139.159.176.78', User='root', Password='liyuhang8', DBName='PersonDatabase')\n if self.sqlhandler.getConnection():\n '\\n 查询练习题\\n '\n sql = \"select Student from CLASS where ClassId='\" + self.classId + \"'\"\n stuIdList = str(self.sqlhandler.executeQuerySQL(sql)[0]['Student']).split(',')\n for stuId in stuIdList:\n sql = \"select * from SCORE where PracticeId='{0}' and StuId='{1}'\".format(self.practiceId, stuId)\n print(sql)\n if len(self.sqlhandler.executeQuerySQL(sql)) == 1:\n isDone = True\n else:\n isDone = False\n sql = \"select StuName from StuPersonInfo where StuId='{0}''\".format(stuId)\n stuName = self.sqlhandler.executeQuerySQL(sql)[0]['StuName']\n self.practicelist.append({'stuId': stuId, 'stuName': stuName, 'isDone': isDone})\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "app/src/main/pythonWork/TeaGetPracticeRequestHandler.py", "source_repo": "lyh-ADT/edu-app", "split": "test", "star_events_count": 1} {"blob_id": "c8d644bc0ff209c8f31ce22d37c9588f6c40fc47", "bodies": ["if not root:\n return 0\nif not root.left:\n return self.minDepth(root.right) + 1\nif not root.right:\n return self.minDepth(root.left) + 1\nleft = self.minDepth(root.left)\nright = self.minDepth(root.right)\nreturn min(left, right) + 1", "if not root:\n return 0\nqueue = [root]\nmin_result = 0\nwhile queue:\n queue_len = len(queue)\n min_result += 1\n for index in range(queue_len):\n node = queue.pop(0)\n if not node.left and (not node.right):\n return min_result\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)"], "bodies_text": "<|body_start_0|>\n if not root:\n return 0\n if not root.left:\n return self.minDepth(root.right) + 1\n if not root.right:\n return self.minDepth(root.left) + 1\n left = self.minDepth(root.left)\n right = self.minDepth(root.right)\n return min(left, right) + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n queue = [root]\n min_result = 0\n while queue:\n queue_len = len(queue)\n min_result += 1\n for index in range(queue_len):\n node = queue.pop(0)\n if not node.left and (not node.right):\n return min_result\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"DFS 递归解法 Args: root: Returns:\"\"\"\n <|body_0|>\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"BFS 解法 Args: root: Returns:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return 0\n if not root.left:\n return self.minDepth(root.right) + 1\n if not root.right:\n return self.minDepth(root.left) + 1\n left = self.minDepth(root.left)\n right = self.minDepth(root.right)\n return min(left, right) + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n queue = [root]\n min_result = 0\n while queue:\n queue_len = len(queue)\n min_result += 1\n for index in range(queue_len):\n node = queue.pop(0)\n if not node.left and (not node.right):\n return min_result\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000361", "length_bytes": 1801, "license_type": "no_license", "methods": [{"docstring": "DFS 递归解法 Args: root: Returns:", "name": "minDepth", "signature": "def minDepth(self, root: TreeNode) -> int"}, {"docstring": "BFS 解法 Args: root: Returns:", "name": "minDepth", "signature": "def minDepth(self, root: TreeNode) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049009", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDepth(self, root: TreeNode) -> int: DFS 递归解法 Args: root: Returns:\n- def minDepth(self, root: TreeNode) -> int: BFS 解法 Args: root: Returns:", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minDepth(self, root: TreeNode) -> int: DFS 递归解法 Args: root: Returns:\n- def minDepth(self, root: TreeNode) -> int: BFS 解法 Args: root: Returns:\n\n<|skeleton|>\nclass Solution:\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"DFS 递归解法 Args: root: Returns:\"\"\"\n <|body_0|>\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"BFS 解法 Args: root: Returns:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return 0\n if not root.left:\n return self.minDepth(root.right) + 1\n if not root.right:\n return self.minDepth(root.left) + 1\n left = self.minDepth(root.left)\n right = self.minDepth(root.right)\n return min(left, right) + 1\n<|end_body_0|>\n\n<|body_start_1|>\n if not root:\n return 0\n queue = [root]\n min_result = 0\n while queue:\n queue_len = len(queue)\n min_result += 1\n for index in range(queue_len):\n node = queue.pop(0)\n if not node.left and (not node.right):\n return min_result\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n<|end_body_1|>\n", "revision_id": "c0dd577481b46129d950354d567d332a4d091137", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"DFS 递归解法 Args: root: Returns:\"\"\"\n <|body_0|>\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"BFS 解法 Args: root: Returns:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"DFS 递归解法 Args: root: Returns:\"\"\"\n if not root:\n return 0\n if not root.left:\n return self.minDepth(root.right) + 1\n if not root.right:\n return self.minDepth(root.left) + 1\n left = self.minDepth(root.left)\n right = self.minDepth(root.right)\n return min(left, right) + 1\n\n def minDepth(self, root: TreeNode) -> int:\n \"\"\"BFS 解法 Args: root: Returns:\"\"\"\n if not root:\n return 0\n queue = [root]\n min_result = 0\n while queue:\n queue_len = len(queue)\n min_result += 1\n for index in range(queue_len):\n node = queue.pop(0)\n if not node.left and (not node.right):\n return min_result\n if node.left:\n queue.append(node.left)\n if node.right:\n queue.append(node.right)\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/111_二叉树的最小深度.py", "source_repo": "tenqaz/crazy_arithmetic", "split": "test", "star_events_count": 0} {"blob_id": "ad53b299e012e1048c65adf1dd9185dd0e26e88e", "bodies": ["self.df = df\nself.var = var\nself.var_name = var_name\nself.y = np.array(df[var].values.flatten())\nself.times = df.index\nself.pardict = init_pars", "F = np.zeros(len(self.y))\na = np.zeros(len(self.y))\nv = np.zeros(len(self.y))\nP = np.zeros(len(self.y))\nP[0] = self.pardict['P1']\na[0] = self.y[0]\nsigma_eps2 = self.pardict['sigma_eps2']\nsigma_eta2 = self.pardict['sigma_eta2']\nfor t in range(0, len(self.y) - 1):\n F[t] = P[t] + sigma_eps2\n Kt = P[t] / F[t] if np.isfinite(self.y[t]) else 0\n v[t] = self.y[t] - a[t]\n a[t + 1] = a[t] + np.nan_to_num(Kt * v[t])\n F[t] = P[t] + sigma_eps2\n P[t + 1] = P[t] * (1 - Kt) + sigma_eta2\nF[-1] = P[-1] + sigma_eps2\nv[-1] = self.y[-1] - a[-1]\nstd = np.sqrt(P * sigma_eps2 / (P + sigma_eps2))\nif plot:\n fig_name = self.var_name + 'Fig26.pdf'\n plot_fig2_6(self.times, self.y, std, P, a, F, fig_name, self.var_name)\nreturn (a, std, P, v, F)"], "bodies_text": "<|body_start_0|>\n self.df = df\n self.var = var\n self.var_name = var_name\n self.y = np.array(df[var].values.flatten())\n self.times = df.index\n self.pardict = init_pars\n<|end_body_0|>\n\n<|body_start_1|>\n F = np.zeros(len(self.y))\n a = np.zeros(len(self.y))\n v = np.zeros(len(self.y))\n P = np.zeros(len(self.y))\n P[0] = self.pardict['P1']\n a[0] = self.y[0]\n sigma_eps2 = self.pardict['sigma_eps2']\n sigma_eta2 = self.pardict['sigma_eta2']\n for t in range(0, len(self.y) - 1):\n F[t] = P[t] + sigma_eps2\n Kt = P[t] / F[t] if np.isfinite(self.y[t]) else 0\n v[t] = self.y[t] - a[t]\n a[t + 1] = a[t] + np.nan_to_num(Kt * v[t])\n F[t] = P[t] + sigma_eps2\n P[t + 1] = P[t] * (1 - Kt) + sigma_eta2\n F[-1] = P[-1] + sigma_eps2\n v[-1] = self.y[-1] - a[-1]\n std = np.sqrt(P * sigma_eps2 / (P + sigma_eps2))\n if plot:\n fig_name = self.var_name + 'Fig26.pdf'\n plot_fig2_6(self.times, self.y, std, P, a, F, fig_name, self.var_name)\n return (a, std, P, v, F)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "KFpredictor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass KFpredictor:\n\n def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile'):\n \"\"\"Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values\"\"\"\n <|body_0|>\n\n def iterate(self, plot=True):\n \"\"\"Iterate over the observations and update the filtered values after each iteration\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.df = df\n self.var = var\n self.var_name = var_name\n self.y = np.array(df[var].values.flatten())\n self.times = df.index\n self.pardict = init_pars\n<|end_body_0|>\n\n<|body_start_1|>\n F = np.zeros(len(self.y))\n a = np.zeros(len(self.y))\n v = np.zeros(len(self.y))\n P = np.zeros(len(self.y))\n P[0] = self.pardict['P1']\n a[0] = self.y[0]\n sigma_eps2 = self.pardict['sigma_eps2']\n sigma_eta2 = self.pardict['sigma_eta2']\n for t in range(0, len(self.y) - 1):\n F[t] = P[t] + sigma_eps2\n Kt = P[t] / F[t] if np.isfinite(self.y[t]) else 0\n v[t] = self.y[t] - a[t]\n a[t + 1] = a[t] + np.nan_to_num(Kt * v[t])\n F[t] = P[t] + sigma_eps2\n P[t + 1] = P[t] * (1 - Kt) + sigma_eta2\n F[-1] = P[-1] + sigma_eps2\n v[-1] = self.y[-1] - a[-1]\n std = np.sqrt(P * sigma_eps2 / (P + sigma_eps2))\n if plot:\n fig_name = self.var_name + 'Fig26.pdf'\n plot_fig2_6(self.times, self.y, std, P, a, F, fig_name, self.var_name)\n return (a, std, P, v, F)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000362", "length_bytes": 1814, "license_type": "no_license", "methods": [{"docstring": "Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values", "name": "__init__", "signature": "def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile')"}, {"docstring": "Iterate over the observations and update the filtered values after each iteration", "name": "iterate", "signature": "def iterate(self, plot=True)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020032", "prompt": "Implement the Python class `KFpredictor` described below.\n\nClass description:\nImplement the KFpredictor class.\n\nMethod signatures and docstrings:\n- def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile'): Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values\n- def iterate(self, plot=True): Iterate over the observations and update the filtered values after each iteration", "prompted_full_text": "Implement the Python class `KFpredictor` described below.\n\nClass description:\nImplement the KFpredictor class.\n\nMethod signatures and docstrings:\n- def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile'): Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values\n- def iterate(self, plot=True): Iterate over the observations and update the filtered values after each iteration\n\n<|skeleton|>\nclass KFpredictor:\n\n def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile'):\n \"\"\"Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values\"\"\"\n <|body_0|>\n\n def iterate(self, plot=True):\n \"\"\"Iterate over the observations and update the filtered values after each iteration\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.df = df\n self.var = var\n self.var_name = var_name\n self.y = np.array(df[var].values.flatten())\n self.times = df.index\n self.pardict = init_pars\n<|end_body_0|>\n\n<|body_start_1|>\n F = np.zeros(len(self.y))\n a = np.zeros(len(self.y))\n v = np.zeros(len(self.y))\n P = np.zeros(len(self.y))\n P[0] = self.pardict['P1']\n a[0] = self.y[0]\n sigma_eps2 = self.pardict['sigma_eps2']\n sigma_eta2 = self.pardict['sigma_eta2']\n for t in range(0, len(self.y) - 1):\n F[t] = P[t] + sigma_eps2\n Kt = P[t] / F[t] if np.isfinite(self.y[t]) else 0\n v[t] = self.y[t] - a[t]\n a[t + 1] = a[t] + np.nan_to_num(Kt * v[t])\n F[t] = P[t] + sigma_eps2\n P[t + 1] = P[t] * (1 - Kt) + sigma_eta2\n F[-1] = P[-1] + sigma_eps2\n v[-1] = self.y[-1] - a[-1]\n std = np.sqrt(P * sigma_eps2 / (P + sigma_eps2))\n if plot:\n fig_name = self.var_name + 'Fig26.pdf'\n plot_fig2_6(self.times, self.y, std, P, a, F, fig_name, self.var_name)\n return (a, std, P, v, F)\n<|end_body_1|>\n", "revision_id": "1b15b5f164c991226618003efd867b62c1eecd29", "skeleton": "<|skeleton|>\nclass KFpredictor:\n\n def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile'):\n \"\"\"Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values\"\"\"\n <|body_0|>\n\n def iterate(self, plot=True):\n \"\"\"Iterate over the observations and update the filtered values after each iteration\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class KFpredictor:\n def __init__(self, df, init_pars, var='dep_var', var_name='Volume of Nile'):\n \"\"\"Initialisation, where df is a pandas DataFrame and var is the name of the column to study and init_pars is a dictionary with initial values\"\"\"\n self.df = df\n self.var = var\n self.var_name = var_name\n self.y = np.array(df[var].values.flatten())\n self.times = df.index\n self.pardict = init_pars\n\n def iterate(self, plot=True):\n \"\"\"Iterate over the observations and update the filtered values after each iteration\"\"\"\n F = np.zeros(len(self.y))\n a = np.zeros(len(self.y))\n v = np.zeros(len(self.y))\n P = np.zeros(len(self.y))\n P[0] = self.pardict['P1']\n a[0] = self.y[0]\n sigma_eps2 = self.pardict['sigma_eps2']\n sigma_eta2 = self.pardict['sigma_eta2']\n for t in range(0, len(self.y) - 1):\n F[t] = P[t] + sigma_eps2\n Kt = P[t] / F[t] if np.isfinite(self.y[t]) else 0\n v[t] = self.y[t] - a[t]\n a[t + 1] = a[t] + np.nan_to_num(Kt * v[t])\n F[t] = P[t] + sigma_eps2\n P[t + 1] = P[t] * (1 - Kt) + sigma_eta2\n F[-1] = P[-1] + sigma_eps2\n v[-1] = self.y[-1] - a[-1]\n std = np.sqrt(P * sigma_eps2 / (P + sigma_eps2))\n if plot:\n fig_name = self.var_name + 'Fig26.pdf'\n plot_fig2_6(self.times, self.y, std, P, a, F, fig_name, self.var_name)\n return (a, std, P, v, F)\n", "source": "the_stack_v2_python_sparse", "source_path": "Assignment1/kalman_prediction.py", "source_repo": "LuukOudshoorn25/Timeseries", "split": "test", "star_events_count": 0} {"blob_id": "d0dc059f717ff4cd9b1e0e706d4f6a8f211cd88e", "bodies": ["jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])\ndp = [[0, 0]]\nfor start, end, profit in jobs:\n i = bisect.bisect(dp, [start + 1]) - 1\n if dp[i][1] + profit > dp[-1][1]:\n dp.append([end, dp[i][1] + profit])\nreturn dp[-1][1]", "jobs = sorted(zip(startTime, endTime, profit), key=lambda x: x[0])\ntotal = 0\nminHeap = []\nfor start, end, current_profit in jobs:\n while minHeap and minHeap[0][0] <= start:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n heapq.heappush(minHeap, (end, total + current_profit))\nwhile minHeap:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\nreturn total"], "bodies_text": "<|body_start_0|>\n jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])\n dp = [[0, 0]]\n for start, end, profit in jobs:\n i = bisect.bisect(dp, [start + 1]) - 1\n if dp[i][1] + profit > dp[-1][1]:\n dp.append([end, dp[i][1] + profit])\n return dp[-1][1]\n<|end_body_0|>\n\n<|body_start_1|>\n jobs = sorted(zip(startTime, endTime, profit), key=lambda x: x[0])\n total = 0\n minHeap = []\n for start, end, current_profit in jobs:\n while minHeap and minHeap[0][0] <= start:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n heapq.heappush(minHeap, (end, total + current_profit))\n while minHeap:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n return total\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)\"\"\"\n <|body_0|>\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])\n dp = [[0, 0]]\n for start, end, profit in jobs:\n i = bisect.bisect(dp, [start + 1]) - 1\n if dp[i][1] + profit > dp[-1][1]:\n dp.append([end, dp[i][1] + profit])\n return dp[-1][1]\n<|end_body_0|>\n\n<|body_start_1|>\n jobs = sorted(zip(startTime, endTime, profit), key=lambda x: x[0])\n total = 0\n minHeap = []\n for start, end, current_profit in jobs:\n while minHeap and minHeap[0][0] <= start:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n heapq.heappush(minHeap, (end, total + current_profit))\n while minHeap:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n return total\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000363", "length_bytes": 2115, "license_type": "no_license", "methods": [{"docstring": "https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)", "name": "jobScheduling", "signature": "def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int"}, {"docstring": "https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap", "name": "jobScheduling", "signature": "def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026071", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int: https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)\n- def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int: https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int: https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)\n- def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int: https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap\n\n<|skeleton|>\nclass Solution:\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)\"\"\"\n <|body_0|>\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])\n dp = [[0, 0]]\n for start, end, profit in jobs:\n i = bisect.bisect(dp, [start + 1]) - 1\n if dp[i][1] + profit > dp[-1][1]:\n dp.append([end, dp[i][1] + profit])\n return dp[-1][1]\n<|end_body_0|>\n\n<|body_start_1|>\n jobs = sorted(zip(startTime, endTime, profit), key=lambda x: x[0])\n total = 0\n minHeap = []\n for start, end, current_profit in jobs:\n while minHeap and minHeap[0][0] <= start:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n heapq.heappush(minHeap, (end, total + current_profit))\n while minHeap:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n return total\n<|end_body_1|>\n", "revision_id": "e50dc0642f087f37ab3234390be3d8a0ed48fe62", "skeleton": "<|skeleton|>\nclass Solution:\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)\"\"\"\n <|body_0|>\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409009/JavaC%2B%2BPython-DP-Solution dynamic programming Time O(NlogN) for sorting Time O(NlogN) for binary search for each job Space O(N)\"\"\"\n jobs = sorted(zip(startTime, endTime, profit), key=lambda v: v[1])\n dp = [[0, 0]]\n for start, end, profit in jobs:\n i = bisect.bisect(dp, [start + 1]) - 1\n if dp[i][1] + profit > dp[-1][1]:\n dp.append([end, dp[i][1] + profit])\n return dp[-1][1]\n\n def jobScheduling(self, startTime: list[int], endTime: list[int], profit: list[int]) -> int:\n \"\"\"https://leetcode.com/problems/maximum-profit-in-job-scheduling/discuss/409358/Python using minHeap\"\"\"\n jobs = sorted(zip(startTime, endTime, profit), key=lambda x: x[0])\n total = 0\n minHeap = []\n for start, end, current_profit in jobs:\n while minHeap and minHeap[0][0] <= start:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n heapq.heappush(minHeap, (end, total + current_profit))\n while minHeap:\n _, profit = heapq.heappop(minHeap)\n total = max(total, profit)\n return total\n", "source": "the_stack_v2_python_sparse", "source_path": "Leetcode/ByteDance/1235. Maximum Profit in Job Scheduling.py", "source_repo": "brlala/Educative-Grokking-Coding-Exercise", "split": "test", "star_events_count": 3} {"blob_id": "c4fa24972c826667efe40d5ed59065a024ad8f0a", "bodies": ["super(CampaignFeedback, self).__init__(*args, **kwargs)\nself.endpoint = 'campaigns'\nself.campaign_id = None\nself.feedback_id = None", "self.campaign_id = campaign_id\nif 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\nresponse = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams)\nif response is not None:\n self.feedback_id = response['feedback_id']\nelse:\n self.feedback_id = None\nreturn response", "self.campaign_id = campaign_id\nself.feedback_id = None\nif get_all:\n return self._iterate(url=self._build_path(campaign_id, 'feedback'), **queryparams)\nelse:\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback'), **queryparams)", "self.campaign_id = campaign_id\nself.feedback_id = feedback_id\nreturn self._mc_client._get(url=self._build_path(campaign_id, 'feedback', feedback_id), **queryparams)", "self.campaign_id = campaign_id\nself.feedback_id = feedback_id\nif 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\nreturn self._mc_client._patch(url=self._build_path(campaign_id, 'feedback', feedback_id), data=data)", "self.campaign_id = campaign_id\nself.feedback_id = feedback_id\nreturn self._mc_client._delete(url=self._build_path(campaign_id, 'feedback', feedback_id))"], "bodies_text": "<|body_start_0|>\n super(CampaignFeedback, self).__init__(*args, **kwargs)\n self.endpoint = 'campaigns'\n self.campaign_id = None\n self.feedback_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.campaign_id = campaign_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n response = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams)\n if response is not None:\n self.feedback_id = response['feedback_id']\n else:\n self.feedback_id = None\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n self.campaign_id = campaign_id\n self.feedback_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n<|end_body_2|>\n\n<|body_start_3|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback', feedback_id), **queryparams)\n<|end_body_3|>\n\n<|body_start_4|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n return self._mc_client._patch(url=self._build_path(campaign_id, 'feedback', feedback_id), data=data)\n<|end_body_4|>\n\n<|body_start_5|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._delete(url=self._build_path(campaign_id, 'feedback', feedback_id))\n<|end_body_5|>\n", "class_docstring": "Post comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.", "class_name": "CampaignFeedback", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CampaignFeedback:\n \"\"\"Post comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n <|body_0|>\n\n def create(self, campaign_id, data, **queryparams):\n \"\"\"Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_1|>\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_2|>\n\n def get(self, campaign_id, feedback_id, **queryparams):\n \"\"\"Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_3|>\n\n def update(self, campaign_id, feedback_id, data):\n \"\"\"Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }\"\"\"\n <|body_4|>\n\n def delete(self, campaign_id, feedback_id):\n \"\"\"Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CampaignFeedback, self).__init__(*args, **kwargs)\n self.endpoint = 'campaigns'\n self.campaign_id = None\n self.feedback_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.campaign_id = campaign_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n response = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams)\n if response is not None:\n self.feedback_id = response['feedback_id']\n else:\n self.feedback_id = None\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n self.campaign_id = campaign_id\n self.feedback_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n<|end_body_2|>\n\n<|body_start_3|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback', feedback_id), **queryparams)\n<|end_body_3|>\n\n<|body_start_4|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n return self._mc_client._patch(url=self._build_path(campaign_id, 'feedback', feedback_id), data=data)\n<|end_body_4|>\n\n<|body_start_5|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._delete(url=self._build_path(campaign_id, 'feedback', feedback_id))\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000364", "length_bytes": 4497, "license_type": "permissive", "methods": [{"docstring": "Initialize the endpoint", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []", "name": "create", "signature": "def create(self, campaign_id, data, **queryparams)"}, {"docstring": "Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []", "name": "all", "signature": "def all(self, campaign_id, get_all=False, **queryparams)"}, {"docstring": "Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []", "name": "get", "signature": "def get(self, campaign_id, feedback_id, **queryparams)"}, {"docstring": "Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }", "name": "update", "signature": "def update(self, campaign_id, feedback_id, data)"}, {"docstring": "Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`", "name": "delete", "signature": "def delete(self, campaign_id, feedback_id)"}], "n_methods": 6, "prompt": "Implement the Python class `CampaignFeedback` described below.\n\nClass description:\nPost comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize the endpoint\n- def create(self, campaign_id, data, **queryparams): Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n- def all(self, campaign_id, get_all=False, **queryparams): Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n- def get(self, campaign_id, feedback_id, **queryparams): Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n- def update(self, campaign_id, feedback_id, data): Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }\n- def delete(self, campaign_id, feedback_id): Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`", "prompted_full_text": "Implement the Python class `CampaignFeedback` described below.\n\nClass description:\nPost comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize the endpoint\n- def create(self, campaign_id, data, **queryparams): Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n- def all(self, campaign_id, get_all=False, **queryparams): Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n- def get(self, campaign_id, feedback_id, **queryparams): Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n- def update(self, campaign_id, feedback_id, data): Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }\n- def delete(self, campaign_id, feedback_id): Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`\n\n<|skeleton|>\nclass CampaignFeedback:\n \"\"\"Post comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n <|body_0|>\n\n def create(self, campaign_id, data, **queryparams):\n \"\"\"Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_1|>\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_2|>\n\n def get(self, campaign_id, feedback_id, **queryparams):\n \"\"\"Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_3|>\n\n def update(self, campaign_id, feedback_id, data):\n \"\"\"Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }\"\"\"\n <|body_4|>\n\n def delete(self, campaign_id, feedback_id):\n \"\"\"Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CampaignFeedback, self).__init__(*args, **kwargs)\n self.endpoint = 'campaigns'\n self.campaign_id = None\n self.feedback_id = None\n<|end_body_0|>\n\n<|body_start_1|>\n self.campaign_id = campaign_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n response = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams)\n if response is not None:\n self.feedback_id = response['feedback_id']\n else:\n self.feedback_id = None\n return response\n<|end_body_1|>\n\n<|body_start_2|>\n self.campaign_id = campaign_id\n self.feedback_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n<|end_body_2|>\n\n<|body_start_3|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback', feedback_id), **queryparams)\n<|end_body_3|>\n\n<|body_start_4|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n return self._mc_client._patch(url=self._build_path(campaign_id, 'feedback', feedback_id), data=data)\n<|end_body_4|>\n\n<|body_start_5|>\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._delete(url=self._build_path(campaign_id, 'feedback', feedback_id))\n<|end_body_5|>\n", "revision_id": "bf61cd602dc44cbff32fbf6f6dcdd33cf6f782e8", "skeleton": "<|skeleton|>\nclass CampaignFeedback:\n \"\"\"Post comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n <|body_0|>\n\n def create(self, campaign_id, data, **queryparams):\n \"\"\"Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_1|>\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_2|>\n\n def get(self, campaign_id, feedback_id, **queryparams):\n \"\"\"Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_3|>\n\n def update(self, campaign_id, feedback_id, data):\n \"\"\"Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }\"\"\"\n <|body_4|>\n\n def delete(self, campaign_id, feedback_id):\n \"\"\"Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CampaignFeedback:\n \"\"\"Post comments, reply to team feedback, and send test emails while you’re working together on a MailChimp campaign.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n super(CampaignFeedback, self).__init__(*args, **kwargs)\n self.endpoint = 'campaigns'\n self.campaign_id = None\n self.feedback_id = None\n\n def create(self, campaign_id, data, **queryparams):\n \"\"\"Add feedback on a specific campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* } :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n self.campaign_id = campaign_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n response = self._mc_client._post(url=self._build_path(campaign_id, 'feedback'), data=data, **queryparams)\n if response is not None:\n self.feedback_id = response['feedback_id']\n else:\n self.feedback_id = None\n return response\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get team feedback while you’re working together on a MailChimp campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n self.campaign_id = campaign_id\n self.feedback_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback'), **queryparams)\n\n def get(self, campaign_id, feedback_id, **queryparams):\n \"\"\"Get a specific feedback message from a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'feedback', feedback_id), **queryparams)\n\n def update(self, campaign_id, feedback_id, data):\n \"\"\"Update a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str` :param data: The request body parameters :type data: :py:class:`dict` data = { \"message\": string* }\"\"\"\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n if 'message' not in data:\n raise KeyError('The campaign feedback must have a message')\n return self._mc_client._patch(url=self._build_path(campaign_id, 'feedback', feedback_id), data=data)\n\n def delete(self, campaign_id, feedback_id):\n \"\"\"Remove a specific feedback message for a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param feedback_id: The unique id for the feedback message. :type feedback_id: :py:class:`str`\"\"\"\n self.campaign_id = campaign_id\n self.feedback_id = feedback_id\n return self._mc_client._delete(url=self._build_path(campaign_id, 'feedback', feedback_id))\n", "source": "the_stack_v2_python_sparse", "source_path": "mailchimp3/entities/campaignfeedback.py", "source_repo": "VingtCinq/python-mailchimp", "split": "test", "star_events_count": 190} {"blob_id": "b2caca50b861acd136c5211fc6d478e9b6671a05", "bodies": ["self.xmax = max(self.xmax, x)\nif node.left:\n xleft = x + 1 if node.left.val == node.val + 1 else 1\n self.backtrack(xleft, node.left)\nif node.right:\n xright = x + 1 if node.right.val == node.val + 1 else 1\n self.backtrack(xright, node.right)", "self.xmax = 0\nif root:\n self.backtrack(1, root)\nreturn self.xmax"], "bodies_text": "<|body_start_0|>\n self.xmax = max(self.xmax, x)\n if node.left:\n xleft = x + 1 if node.left.val == node.val + 1 else 1\n self.backtrack(xleft, node.left)\n if node.right:\n xright = x + 1 if node.right.val == node.val + 1 else 1\n self.backtrack(xright, node.right)\n<|end_body_0|>\n\n<|body_start_1|>\n self.xmax = 0\n if root:\n self.backtrack(1, root)\n return self.xmax\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def backtrack(self, x, node):\n \"\"\"x: length of consecutive path to this node.\"\"\"\n <|body_0|>\n\n def longestConsecutive(self, root: TreeNode) -> int:\n \"\"\"DFS\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.xmax = max(self.xmax, x)\n if node.left:\n xleft = x + 1 if node.left.val == node.val + 1 else 1\n self.backtrack(xleft, node.left)\n if node.right:\n xright = x + 1 if node.right.val == node.val + 1 else 1\n self.backtrack(xright, node.right)\n<|end_body_0|>\n\n<|body_start_1|>\n self.xmax = 0\n if root:\n self.backtrack(1, root)\n return self.xmax\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000365", "length_bytes": 1006, "license_type": "no_license", "methods": [{"docstring": "x: length of consecutive path to this node.", "name": "backtrack", "signature": "def backtrack(self, x, node)"}, {"docstring": "DFS", "name": "longestConsecutive", "signature": "def longestConsecutive(self, root: TreeNode) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_039117", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def backtrack(self, x, node): x: length of consecutive path to this node.\n- def longestConsecutive(self, root: TreeNode) -> int: DFS", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def backtrack(self, x, node): x: length of consecutive path to this node.\n- def longestConsecutive(self, root: TreeNode) -> int: DFS\n\n<|skeleton|>\nclass Solution:\n\n def backtrack(self, x, node):\n \"\"\"x: length of consecutive path to this node.\"\"\"\n <|body_0|>\n\n def longestConsecutive(self, root: TreeNode) -> int:\n \"\"\"DFS\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.xmax = max(self.xmax, x)\n if node.left:\n xleft = x + 1 if node.left.val == node.val + 1 else 1\n self.backtrack(xleft, node.left)\n if node.right:\n xright = x + 1 if node.right.val == node.val + 1 else 1\n self.backtrack(xright, node.right)\n<|end_body_0|>\n\n<|body_start_1|>\n self.xmax = 0\n if root:\n self.backtrack(1, root)\n return self.xmax\n<|end_body_1|>\n", "revision_id": "6043134736452a6f4704b62857d0aed2e9571164", "skeleton": "<|skeleton|>\nclass Solution:\n\n def backtrack(self, x, node):\n \"\"\"x: length of consecutive path to this node.\"\"\"\n <|body_0|>\n\n def longestConsecutive(self, root: TreeNode) -> int:\n \"\"\"DFS\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def backtrack(self, x, node):\n \"\"\"x: length of consecutive path to this node.\"\"\"\n self.xmax = max(self.xmax, x)\n if node.left:\n xleft = x + 1 if node.left.val == node.val + 1 else 1\n self.backtrack(xleft, node.left)\n if node.right:\n xright = x + 1 if node.right.val == node.val + 1 else 1\n self.backtrack(xright, node.right)\n\n def longestConsecutive(self, root: TreeNode) -> int:\n \"\"\"DFS\"\"\"\n self.xmax = 0\n if root:\n self.backtrack(1, root)\n return self.xmax\n", "source": "the_stack_v2_python_sparse", "source_path": "src/0200-0299/0298.longest.consecutive.path.bt.py", "source_repo": "gyang274/leetcode", "split": "test", "star_events_count": 1} {"blob_id": "a7e187abfd5943af19f545145d7b31c2ef09db95", "bodies": ["if not value:\n return None\nreturn ''.join([f'{int(i):02x}' for i in value.split(':')])", "if not value:\n return None\nvalue = value.lstrip('#')\nreturn ':'.join([str(int(value[i:i + 2], 16)) for i in range(0, len(value) - 1, 2)])"], "bodies_text": "<|body_start_0|>\n if not value:\n return None\n return ''.join([f'{int(i):02x}' for i in value.split(':')])\n<|end_body_0|>\n\n<|body_start_1|>\n if not value:\n return None\n value = value.lstrip('#')\n return ':'.join([str(int(value[i:i + 2], 16)) for i in range(0, len(value) - 1, 2)])\n<|end_body_1|>\n", "class_docstring": "Utility field class for color values.", "class_name": "ColorField", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ColorField:\n \"\"\"Utility field class for color values.\"\"\"\n\n def _serialize(self, value: str, *_, **__):\n \"\"\"Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).\"\"\"\n <|body_0|>\n\n def _deserialize(self, value: str, *_, **__):\n \"\"\"Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not value:\n return None\n return ''.join([f'{int(i):02x}' for i in value.split(':')])\n<|end_body_0|>\n\n<|body_start_1|>\n if not value:\n return None\n value = value.lstrip('#')\n return ':'.join([str(int(value[i:i + 2], 16)) for i in range(0, len(value) - 1, 2)])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000366", "length_bytes": 8416, "license_type": "permissive", "methods": [{"docstring": "Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).", "name": "_serialize", "signature": "def _serialize(self, value: str, *_, **__)"}, {"docstring": "Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).", "name": "_deserialize", "signature": "def _deserialize(self, value: str, *_, **__)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013947", "prompt": "Implement the Python class `ColorField` described below.\n\nClass description:\nUtility field class for color values.\n\nMethod signatures and docstrings:\n- def _serialize(self, value: str, *_, **__): Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).\n- def _deserialize(self, value: str, *_, **__): Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).", "prompted_full_text": "Implement the Python class `ColorField` described below.\n\nClass description:\nUtility field class for color values.\n\nMethod signatures and docstrings:\n- def _serialize(self, value: str, *_, **__): Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).\n- def _deserialize(self, value: str, *_, **__): Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).\n\n<|skeleton|>\nclass ColorField:\n \"\"\"Utility field class for color values.\"\"\"\n\n def _serialize(self, value: str, *_, **__):\n \"\"\"Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).\"\"\"\n <|body_0|>\n\n def _deserialize(self, value: str, *_, **__):\n \"\"\"Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not value:\n return None\n return ''.join([f'{int(i):02x}' for i in value.split(':')])\n<|end_body_0|>\n\n<|body_start_1|>\n if not value:\n return None\n value = value.lstrip('#')\n return ':'.join([str(int(value[i:i + 2], 16)) for i in range(0, len(value) - 1, 2)])\n<|end_body_1|>\n", "revision_id": "446bc2f67493d3554c5422242ff91d5b5c76d78a", "skeleton": "<|skeleton|>\nclass ColorField:\n \"\"\"Utility field class for color values.\"\"\"\n\n def _serialize(self, value: str, *_, **__):\n \"\"\"Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).\"\"\"\n <|body_0|>\n\n def _deserialize(self, value: str, *_, **__):\n \"\"\"Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ColorField:\n \"\"\"Utility field class for color values.\"\"\"\n\n def _serialize(self, value: str, *_, **__):\n \"\"\"Convert a hex native color value (``ff0000``) to the format exposed by the SwitchBot API (``255:0:0``).\"\"\"\n if not value:\n return None\n return ''.join([f'{int(i):02x}' for i in value.split(':')])\n\n def _deserialize(self, value: str, *_, **__):\n \"\"\"Convert a SwitchBot API color value (``255:0:0``) to the hex native format (``ff0000``).\"\"\"\n if not value:\n return None\n value = value.lstrip('#')\n return ':'.join([str(int(value[i:i + 2], 16)) for i in range(0, len(value) - 1, 2)])\n", "source": "the_stack_v2_python_sparse", "source_path": "platypush/schemas/switchbot.py", "source_repo": "BlackLight/platypush", "split": "test", "star_events_count": 265} {"blob_id": "ef430778b6f7fc8d070b9e4a58108309317a30b6", "bodies": ["super(ReportClickDetailReports, self).__init__(*args, **kwargs)\nself.endpoint = 'reports'\nself.campaign_id = None\nself.link_id = None\nself.members = ReportClickDetailMembers(self)", "self.campaign_id = campaign_id\nself.link_id = None\nif get_all:\n return self._iterate(url=self._build_path(campaign_id, 'click-details'), **queryparams)\nelse:\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details'), **queryparams)", "self.campaign_id = campaign_id\nself.link_id = link_id\nreturn self._mc_client._get(url=self._build_path(campaign_id, 'click-details', link_id), **queryparams)"], "bodies_text": "<|body_start_0|>\n super(ReportClickDetailReports, self).__init__(*args, **kwargs)\n self.endpoint = 'reports'\n self.campaign_id = None\n self.link_id = None\n self.members = ReportClickDetailMembers(self)\n<|end_body_0|>\n\n<|body_start_1|>\n self.campaign_id = campaign_id\n self.link_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n<|end_body_1|>\n\n<|body_start_2|>\n self.campaign_id = campaign_id\n self.link_id = link_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details', link_id), **queryparams)\n<|end_body_2|>\n", "class_docstring": "Get detailed information about links clicked in campaigns.", "class_name": "ReportClickDetailReports", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReportClickDetailReports:\n \"\"\"Get detailed information about links clicked in campaigns.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n <|body_0|>\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer\"\"\"\n <|body_1|>\n\n def get(self, campaign_id, link_id, **queryparams):\n \"\"\"Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ReportClickDetailReports, self).__init__(*args, **kwargs)\n self.endpoint = 'reports'\n self.campaign_id = None\n self.link_id = None\n self.members = ReportClickDetailMembers(self)\n<|end_body_0|>\n\n<|body_start_1|>\n self.campaign_id = campaign_id\n self.link_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n<|end_body_1|>\n\n<|body_start_2|>\n self.campaign_id = campaign_id\n self.link_id = link_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details', link_id), **queryparams)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000367", "length_bytes": 2395, "license_type": "permissive", "methods": [{"docstring": "Initialize the endpoint", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer", "name": "all", "signature": "def all(self, campaign_id, get_all=False, **queryparams)"}, {"docstring": "Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []", "name": "get", "signature": "def get(self, campaign_id, link_id, **queryparams)"}], "n_methods": 3, "prompt": "Implement the Python class `ReportClickDetailReports` described below.\n\nClass description:\nGet detailed information about links clicked in campaigns.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize the endpoint\n- def all(self, campaign_id, get_all=False, **queryparams): Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer\n- def get(self, campaign_id, link_id, **queryparams): Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []", "prompted_full_text": "Implement the Python class `ReportClickDetailReports` described below.\n\nClass description:\nGet detailed information about links clicked in campaigns.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize the endpoint\n- def all(self, campaign_id, get_all=False, **queryparams): Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer\n- def get(self, campaign_id, link_id, **queryparams): Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\n\n<|skeleton|>\nclass ReportClickDetailReports:\n \"\"\"Get detailed information about links clicked in campaigns.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n <|body_0|>\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer\"\"\"\n <|body_1|>\n\n def get(self, campaign_id, link_id, **queryparams):\n \"\"\"Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ReportClickDetailReports, self).__init__(*args, **kwargs)\n self.endpoint = 'reports'\n self.campaign_id = None\n self.link_id = None\n self.members = ReportClickDetailMembers(self)\n<|end_body_0|>\n\n<|body_start_1|>\n self.campaign_id = campaign_id\n self.link_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n<|end_body_1|>\n\n<|body_start_2|>\n self.campaign_id = campaign_id\n self.link_id = link_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details', link_id), **queryparams)\n<|end_body_2|>\n", "revision_id": "bf61cd602dc44cbff32fbf6f6dcdd33cf6f782e8", "skeleton": "<|skeleton|>\nclass ReportClickDetailReports:\n \"\"\"Get detailed information about links clicked in campaigns.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n <|body_0|>\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer\"\"\"\n <|body_1|>\n\n def get(self, campaign_id, link_id, **queryparams):\n \"\"\"Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ReportClickDetailReports:\n \"\"\"Get detailed information about links clicked in campaigns.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the endpoint\"\"\"\n super(ReportClickDetailReports, self).__init__(*args, **kwargs)\n self.endpoint = 'reports'\n self.campaign_id = None\n self.link_id = None\n self.members = ReportClickDetailMembers(self)\n\n def all(self, campaign_id, get_all=False, **queryparams):\n \"\"\"Get information about clicks on specific links in your MailChimp campaigns. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param get_all: Should the query get all results :type get_all: :py:class:`bool` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = [] queryparams['count'] = integer queryparams['offset'] = integer\"\"\"\n self.campaign_id = campaign_id\n self.link_id = None\n if get_all:\n return self._iterate(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n else:\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details'), **queryparams)\n\n def get(self, campaign_id, link_id, **queryparams):\n \"\"\"Get click details for a specific link in a campaign. :param campaign_id: The unique id for the campaign. :type campaign_id: :py:class:`str` :param link_id: The id for the link. :type link_id: :py:class:`str` :param queryparams: The query string parameters queryparams['fields'] = [] queryparams['exclude_fields'] = []\"\"\"\n self.campaign_id = campaign_id\n self.link_id = link_id\n return self._mc_client._get(url=self._build_path(campaign_id, 'click-details', link_id), **queryparams)\n", "source": "the_stack_v2_python_sparse", "source_path": "mailchimp3/entities/reportclickdetailreports.py", "source_repo": "VingtCinq/python-mailchimp", "split": "test", "star_events_count": 190} {"blob_id": "c8a92e3be05aaa1f4307a4ba750ad86a4a5b8dd5", "bodies": ["self.val2nodes = dict()\nself.nodes = list()\nself.node2index = dict()", "is_new = val not in self.val2nodes\nnode = LinkedListNode(val)\nself.nodes.append(node)\nself.node2index[node] = len(self.nodes) - 1\nif is_new:\n self.val2nodes[val] = node\n return True\nelse:\n existed_nodes = self.val2nodes[val]\n node.nxt = existed_nodes\n self.val2nodes[val] = node\n return False", "if not val in self.val2nodes:\n return False\nhead_old = self.val2nodes[val]\nself.val2nodes[val] = head_old.nxt\nif self.val2nodes[val] == None:\n self.val2nodes.pop(val)\nindex = self.node2index[head_old]\nlast_node = self.nodes[-1]\nself.nodes[index] = last_node\nself.nodes.pop(-1)\nself.node2index[last_node] = index\nreturn True", "index = random.randrange(len(self.nodes))\nnode = self.nodes[index]\nreturn node.val"], "bodies_text": "<|body_start_0|>\n self.val2nodes = dict()\n self.nodes = list()\n self.node2index = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n is_new = val not in self.val2nodes\n node = LinkedListNode(val)\n self.nodes.append(node)\n self.node2index[node] = len(self.nodes) - 1\n if is_new:\n self.val2nodes[val] = node\n return True\n else:\n existed_nodes = self.val2nodes[val]\n node.nxt = existed_nodes\n self.val2nodes[val] = node\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if not val in self.val2nodes:\n return False\n head_old = self.val2nodes[val]\n self.val2nodes[val] = head_old.nxt\n if self.val2nodes[val] == None:\n self.val2nodes.pop(val)\n index = self.node2index[head_old]\n last_node = self.nodes[-1]\n self.nodes[index] = last_node\n self.nodes.pop(-1)\n self.node2index[last_node] = index\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n index = random.randrange(len(self.nodes))\n node = self.nodes[index]\n return node.val\n<|end_body_3|>\n", "class_docstring": "", "class_name": "RandomizedCollection", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RandomizedCollection:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def insert(self, val):\n \"\"\"Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool\"\"\"\n <|body_1|>\n\n def remove(self, val):\n \"\"\"Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool\"\"\"\n <|body_2|>\n\n def getRandom(self):\n \"\"\"Get a random element from the collection. :rtype: int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.val2nodes = dict()\n self.nodes = list()\n self.node2index = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n is_new = val not in self.val2nodes\n node = LinkedListNode(val)\n self.nodes.append(node)\n self.node2index[node] = len(self.nodes) - 1\n if is_new:\n self.val2nodes[val] = node\n return True\n else:\n existed_nodes = self.val2nodes[val]\n node.nxt = existed_nodes\n self.val2nodes[val] = node\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if not val in self.val2nodes:\n return False\n head_old = self.val2nodes[val]\n self.val2nodes[val] = head_old.nxt\n if self.val2nodes[val] == None:\n self.val2nodes.pop(val)\n index = self.node2index[head_old]\n last_node = self.nodes[-1]\n self.nodes[index] = last_node\n self.nodes.pop(-1)\n self.node2index[last_node] = index\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n index = random.randrange(len(self.nodes))\n node = self.nodes[index]\n return node.val\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000368", "length_bytes": 2129, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool", "name": "insert", "signature": "def insert(self, val)"}, {"docstring": "Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool", "name": "remove", "signature": "def remove(self, val)"}, {"docstring": "Get a random element from the collection. :rtype: int", "name": "getRandom", "signature": "def getRandom(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_023416", "prompt": "Implement the Python class `RandomizedCollection` described below.\n\nClass description:\nImplement the RandomizedCollection class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def insert(self, val): Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool\n- def remove(self, val): Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool\n- def getRandom(self): Get a random element from the collection. :rtype: int", "prompted_full_text": "Implement the Python class `RandomizedCollection` described below.\n\nClass description:\nImplement the RandomizedCollection class.\n\nMethod signatures and docstrings:\n- def __init__(self): Initialize your data structure here.\n- def insert(self, val): Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool\n- def remove(self, val): Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool\n- def getRandom(self): Get a random element from the collection. :rtype: int\n\n<|skeleton|>\nclass RandomizedCollection:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def insert(self, val):\n \"\"\"Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool\"\"\"\n <|body_1|>\n\n def remove(self, val):\n \"\"\"Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool\"\"\"\n <|body_2|>\n\n def getRandom(self):\n \"\"\"Get a random element from the collection. :rtype: int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.val2nodes = dict()\n self.nodes = list()\n self.node2index = dict()\n<|end_body_0|>\n\n<|body_start_1|>\n is_new = val not in self.val2nodes\n node = LinkedListNode(val)\n self.nodes.append(node)\n self.node2index[node] = len(self.nodes) - 1\n if is_new:\n self.val2nodes[val] = node\n return True\n else:\n existed_nodes = self.val2nodes[val]\n node.nxt = existed_nodes\n self.val2nodes[val] = node\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if not val in self.val2nodes:\n return False\n head_old = self.val2nodes[val]\n self.val2nodes[val] = head_old.nxt\n if self.val2nodes[val] == None:\n self.val2nodes.pop(val)\n index = self.node2index[head_old]\n last_node = self.nodes[-1]\n self.nodes[index] = last_node\n self.nodes.pop(-1)\n self.node2index[last_node] = index\n return True\n<|end_body_2|>\n\n<|body_start_3|>\n index = random.randrange(len(self.nodes))\n node = self.nodes[index]\n return node.val\n<|end_body_3|>\n", "revision_id": "44f422b75aa296cbb42d968ff843969af7bfa18a", "skeleton": "<|skeleton|>\nclass RandomizedCollection:\n\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n <|body_0|>\n\n def insert(self, val):\n \"\"\"Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool\"\"\"\n <|body_1|>\n\n def remove(self, val):\n \"\"\"Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool\"\"\"\n <|body_2|>\n\n def getRandom(self):\n \"\"\"Get a random element from the collection. :rtype: int\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RandomizedCollection:\n def __init__(self):\n \"\"\"Initialize your data structure here.\"\"\"\n self.val2nodes = dict()\n self.nodes = list()\n self.node2index = dict()\n\n def insert(self, val):\n \"\"\"Inserts a value to the collection. Returns true if the collection did not already contain the specified element. :type val: int :rtype: bool\"\"\"\n is_new = val not in self.val2nodes\n node = LinkedListNode(val)\n self.nodes.append(node)\n self.node2index[node] = len(self.nodes) - 1\n if is_new:\n self.val2nodes[val] = node\n return True\n else:\n existed_nodes = self.val2nodes[val]\n node.nxt = existed_nodes\n self.val2nodes[val] = node\n return False\n\n def remove(self, val):\n \"\"\"Removes a value from the collection. Returns true if the collection contained the specified element. :type val: int :rtype: bool\"\"\"\n if not val in self.val2nodes:\n return False\n head_old = self.val2nodes[val]\n self.val2nodes[val] = head_old.nxt\n if self.val2nodes[val] == None:\n self.val2nodes.pop(val)\n index = self.node2index[head_old]\n last_node = self.nodes[-1]\n self.nodes[index] = last_node\n self.nodes.pop(-1)\n self.node2index[last_node] = index\n return True\n\n def getRandom(self):\n \"\"\"Get a random element from the collection. :rtype: int\"\"\"\n index = random.randrange(len(self.nodes))\n node = self.nodes[index]\n return node.val\n", "source": "the_stack_v2_python_sparse", "source_path": "Data Structure/Hash Map/954/954_Jiuzhang_Su.py", "source_repo": "liuhz0926/algorithm_practicing_progress", "split": "test", "star_events_count": 0} {"blob_id": "ebbfcc7db2cd49d69d6e85d243ca3a6f0276dd07", "bodies": ["self.c = c\nself.children = []\nself.done = set()\ntry:\n module = __import__(parameter)\nexcept Exception as err:\n g.es('Failed: %s' % (err,), color='red')\nelse:\n components = parameter.split('.')\n for comp in components[1:]:\n module = getattr(module, comp)\n self.children.extend(self.getDocsFor(module))", "children = []\nfor name in dir(object):\n item = getattr(object, name)\n if not name.startswith('_') and (not id(item) in self.done):\n self.done.add(id(item))\n if inspect.isclass(item):\n grandchildren = self.getDocsFor(item)\n else:\n grandchildren = []\n children.append(TreeNode(name, getattr(item, '__doc__', 'No documentation for %s' % name), grandchildren))\nreturn children"], "bodies_text": "<|body_start_0|>\n self.c = c\n self.children = []\n self.done = set()\n try:\n module = __import__(parameter)\n except Exception as err:\n g.es('Failed: %s' % (err,), color='red')\n else:\n components = parameter.split('.')\n for comp in components[1:]:\n module = getattr(module, comp)\n self.children.extend(self.getDocsFor(module))\n<|end_body_0|>\n\n<|body_start_1|>\n children = []\n for name in dir(object):\n item = getattr(object, name)\n if not name.startswith('_') and (not id(item) in self.done):\n self.done.add(id(item))\n if inspect.isclass(item):\n grandchildren = self.getDocsFor(item)\n else:\n grandchildren = []\n children.append(TreeNode(name, getattr(item, '__doc__', 'No documentation for %s' % name), grandchildren))\n return children\n<|end_body_1|>\n", "class_docstring": "Handler for documentation nodes", "class_name": "Doc", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Doc:\n \"\"\"Handler for documentation nodes\"\"\"\n\n def initFrom(self, c, parameter):\n \"\"\"Initialize the tree\"\"\"\n <|body_0|>\n\n def getDocsFor(self, object):\n \"\"\"Return a list of child nodes documenting the object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.c = c\n self.children = []\n self.done = set()\n try:\n module = __import__(parameter)\n except Exception as err:\n g.es('Failed: %s' % (err,), color='red')\n else:\n components = parameter.split('.')\n for comp in components[1:]:\n module = getattr(module, comp)\n self.children.extend(self.getDocsFor(module))\n<|end_body_0|>\n\n<|body_start_1|>\n children = []\n for name in dir(object):\n item = getattr(object, name)\n if not name.startswith('_') and (not id(item) in self.done):\n self.done.add(id(item))\n if inspect.isclass(item):\n grandchildren = self.getDocsFor(item)\n else:\n grandchildren = []\n children.append(TreeNode(name, getattr(item, '__doc__', 'No documentation for %s' % name), grandchildren))\n return children\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000369", "length_bytes": 2068, "license_type": "no_license", "methods": [{"docstring": "Initialize the tree", "name": "initFrom", "signature": "def initFrom(self, c, parameter)"}, {"docstring": "Return a list of child nodes documenting the object", "name": "getDocsFor", "signature": "def getDocsFor(self, object)"}], "n_methods": 2, "prompt": "Implement the Python class `Doc` described below.\n\nClass description:\nHandler for documentation nodes\n\nMethod signatures and docstrings:\n- def initFrom(self, c, parameter): Initialize the tree\n- def getDocsFor(self, object): Return a list of child nodes documenting the object", "prompted_full_text": "Implement the Python class `Doc` described below.\n\nClass description:\nHandler for documentation nodes\n\nMethod signatures and docstrings:\n- def initFrom(self, c, parameter): Initialize the tree\n- def getDocsFor(self, object): Return a list of child nodes documenting the object\n\n<|skeleton|>\nclass Doc:\n \"\"\"Handler for documentation nodes\"\"\"\n\n def initFrom(self, c, parameter):\n \"\"\"Initialize the tree\"\"\"\n <|body_0|>\n\n def getDocsFor(self, object):\n \"\"\"Return a list of child nodes documenting the object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.c = c\n self.children = []\n self.done = set()\n try:\n module = __import__(parameter)\n except Exception as err:\n g.es('Failed: %s' % (err,), color='red')\n else:\n components = parameter.split('.')\n for comp in components[1:]:\n module = getattr(module, comp)\n self.children.extend(self.getDocsFor(module))\n<|end_body_0|>\n\n<|body_start_1|>\n children = []\n for name in dir(object):\n item = getattr(object, name)\n if not name.startswith('_') and (not id(item) in self.done):\n self.done.add(id(item))\n if inspect.isclass(item):\n grandchildren = self.getDocsFor(item)\n else:\n grandchildren = []\n children.append(TreeNode(name, getattr(item, '__doc__', 'No documentation for %s' % name), grandchildren))\n return children\n<|end_body_1|>\n", "revision_id": "28c22721e1bc313c120a8a6c288893bc566a5c67", "skeleton": "<|skeleton|>\nclass Doc:\n \"\"\"Handler for documentation nodes\"\"\"\n\n def initFrom(self, c, parameter):\n \"\"\"Initialize the tree\"\"\"\n <|body_0|>\n\n def getDocsFor(self, object):\n \"\"\"Return a list of child nodes documenting the object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Doc:\n \"\"\"Handler for documentation nodes\"\"\"\n\n def initFrom(self, c, parameter):\n \"\"\"Initialize the tree\"\"\"\n self.c = c\n self.children = []\n self.done = set()\n try:\n module = __import__(parameter)\n except Exception as err:\n g.es('Failed: %s' % (err,), color='red')\n else:\n components = parameter.split('.')\n for comp in components[1:]:\n module = getattr(module, comp)\n self.children.extend(self.getDocsFor(module))\n\n def getDocsFor(self, object):\n \"\"\"Return a list of child nodes documenting the object\"\"\"\n children = []\n for name in dir(object):\n item = getattr(object, name)\n if not name.startswith('_') and (not id(item) in self.done):\n self.done.add(id(item))\n if inspect.isclass(item):\n grandchildren = self.getDocsFor(item)\n else:\n grandchildren = []\n children.append(TreeNode(name, getattr(item, '__doc__', 'No documentation for %s' % name), grandchildren))\n return children\n", "source": "the_stack_v2_python_sparse", "source_path": "Projects/Archived Tk code/trees/doc.py", "source_repo": "leo-editor/leo-editor-contrib", "split": "test", "star_events_count": 6} {"blob_id": "47d1aecfcce201f69ca7c7aa2232c3fc84c77e45", "bodies": ["n = len(s)\nif n < k:\n return 0\ni = 0\nres = 0\nwhile i <= n - k:\n temp = [0 for i in range(26)]\n mask = 0\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n if temp[t] < k:\n mask |= 1 << t\n else:\n mask &= ~(1 << t)\n if mask == 0:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\nreturn res", "n = len(s)\nif n < k:\n return 0\ni = 0\nres = 0\nwhile i <= n - k:\n temp = [0 for i in range(26)]\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n mask = min(filter(bool, temp))\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\nreturn res", "n = len(s)\nif n < k:\n return 0\ni = 0\nres = 0\nwhile i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n i = min(mpos)\n else:\n i = max_idx + 1\nreturn res", "n = len(s)\nif n < k:\n return 0\ni = 0\nres = 0\nwhile i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n if len(mpos) == 0:\n break\n i = max(min(mpos), i + 1)\n else:\n i = max_idx + 1\nreturn res", "if len(s) < k:\n return 0\ndict_s = {}\nfor i in s:\n if dict_s.get(i):\n dict_s[i] += 1\n else:\n dict_s[i] = 1\nmax_len = 0\nstart = 0\nf = 0\nfor i in range(len(s)):\n if dict_s[s[i]] < k:\n max_len = max(max_len, self.longestSubstring(s[start:i], k))\n start = i + 1\n f = 1\nif len(s) - start >= k and f == 0:\n max_len = max(max_len, len(s) - start)\nelse:\n max_len = max(max_len, self.longestSubstring(s[start:], k))\nreturn max_len", "dic = {}\nfor i in s:\n dic[i] = dic.get(i, 0) + 1\nfor j in dic:\n if dic[j] < k:\n ss = s.split(j)\n return max([self.longestSubstring(t, k) for t in ss])\nreturn len(s)"], "bodies_text": "<|body_start_0|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n mask = 0\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n if temp[t] < k:\n mask |= 1 << t\n else:\n mask &= ~(1 << t)\n if mask == 0:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n mask = min(filter(bool, temp))\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n i = min(mpos)\n else:\n i = max_idx + 1\n return res\n<|end_body_2|>\n\n<|body_start_3|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n if len(mpos) == 0:\n break\n i = max(min(mpos), i + 1)\n else:\n i = max_idx + 1\n return res\n<|end_body_3|>\n\n<|body_start_4|>\n if len(s) < k:\n return 0\n dict_s = {}\n for i in s:\n if dict_s.get(i):\n dict_s[i] += 1\n else:\n dict_s[i] = 1\n max_len = 0\n start = 0\n f = 0\n for i in range(len(s)):\n if dict_s[s[i]] < k:\n max_len = max(max_len, self.longestSubstring(s[start:i], k))\n start = i + 1\n f = 1\n if len(s) - start >= k and f == 0:\n max_len = max(max_len, len(s) - start)\n else:\n max_len = max(max_len, self.longestSubstring(s[start:], k))\n return max_len\n<|end_body_4|>\n\n<|body_start_5|>\n dic = {}\n for i in s:\n dic[i] = dic.get(i, 0) + 1\n for j in dic:\n if dic[j] < k:\n ss = s.split(j)\n return max([self.longestSubstring(t, k) for t in ss])\n return len(s)\n<|end_body_5|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def longestSubstring(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def longestSubstring02(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n def longestSubstring03(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_2|>\n\n def longestSubstring04(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_3|>\n\n def longestSubstring05(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_4|>\n\n def longestSubstring06(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n mask = 0\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n if temp[t] < k:\n mask |= 1 << t\n else:\n mask &= ~(1 << t)\n if mask == 0:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n mask = min(filter(bool, temp))\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n i = min(mpos)\n else:\n i = max_idx + 1\n return res\n<|end_body_2|>\n\n<|body_start_3|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n if len(mpos) == 0:\n break\n i = max(min(mpos), i + 1)\n else:\n i = max_idx + 1\n return res\n<|end_body_3|>\n\n<|body_start_4|>\n if len(s) < k:\n return 0\n dict_s = {}\n for i in s:\n if dict_s.get(i):\n dict_s[i] += 1\n else:\n dict_s[i] = 1\n max_len = 0\n start = 0\n f = 0\n for i in range(len(s)):\n if dict_s[s[i]] < k:\n max_len = max(max_len, self.longestSubstring(s[start:i], k))\n start = i + 1\n f = 1\n if len(s) - start >= k and f == 0:\n max_len = max(max_len, len(s) - start)\n else:\n max_len = max(max_len, self.longestSubstring(s[start:], k))\n return max_len\n<|end_body_4|>\n\n<|body_start_5|>\n dic = {}\n for i in s:\n dic[i] = dic.get(i, 0) + 1\n for j in dic:\n if dic[j] < k:\n ss = s.split(j)\n return max([self.longestSubstring(t, k) for t in ss])\n return len(s)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000370", "length_bytes": 15354, "license_type": "no_license", "methods": [{"docstring": ":type s: str :type k: int :rtype: int", "name": "longestSubstring", "signature": "def longestSubstring(self, s, k)"}, {"docstring": ":type s: str :type k: int :rtype: int", "name": "longestSubstring02", "signature": "def longestSubstring02(self, s, k)"}, {"docstring": ":type s: str :type k: int :rtype: int", "name": "longestSubstring03", "signature": "def longestSubstring03(self, s, k)"}, {"docstring": ":type s: str :type k: int :rtype: int", "name": "longestSubstring04", "signature": "def longestSubstring04(self, s, k)"}, {"docstring": ":type s: str :type k: int :rtype: int", "name": "longestSubstring05", "signature": "def longestSubstring05(self, s, k)"}, {"docstring": ":type s: str :type k: int :rtype: int", "name": "longestSubstring06", "signature": "def longestSubstring06(self, s, k)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_train_034522", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestSubstring(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring02(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring03(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring04(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring05(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring06(self, s, k): :type s: str :type k: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def longestSubstring(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring02(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring03(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring04(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring05(self, s, k): :type s: str :type k: int :rtype: int\n- def longestSubstring06(self, s, k): :type s: str :type k: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def longestSubstring(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def longestSubstring02(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n def longestSubstring03(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_2|>\n\n def longestSubstring04(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_3|>\n\n def longestSubstring05(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_4|>\n\n def longestSubstring06(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n mask = 0\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n if temp[t] < k:\n mask |= 1 << t\n else:\n mask &= ~(1 << t)\n if mask == 0:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n mask = min(filter(bool, temp))\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n<|end_body_1|>\n\n<|body_start_2|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n i = min(mpos)\n else:\n i = max_idx + 1\n return res\n<|end_body_2|>\n\n<|body_start_3|>\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n if len(mpos) == 0:\n break\n i = max(min(mpos), i + 1)\n else:\n i = max_idx + 1\n return res\n<|end_body_3|>\n\n<|body_start_4|>\n if len(s) < k:\n return 0\n dict_s = {}\n for i in s:\n if dict_s.get(i):\n dict_s[i] += 1\n else:\n dict_s[i] = 1\n max_len = 0\n start = 0\n f = 0\n for i in range(len(s)):\n if dict_s[s[i]] < k:\n max_len = max(max_len, self.longestSubstring(s[start:i], k))\n start = i + 1\n f = 1\n if len(s) - start >= k and f == 0:\n max_len = max(max_len, len(s) - start)\n else:\n max_len = max(max_len, self.longestSubstring(s[start:], k))\n return max_len\n<|end_body_4|>\n\n<|body_start_5|>\n dic = {}\n for i in s:\n dic[i] = dic.get(i, 0) + 1\n for j in dic:\n if dic[j] < k:\n ss = s.split(j)\n return max([self.longestSubstring(t, k) for t in ss])\n return len(s)\n<|end_body_5|>\n", "revision_id": "b68b6fb256cae18f18517e2ec3060019c1e2cd3f", "skeleton": "<|skeleton|>\nclass Solution:\n\n def longestSubstring(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_0|>\n\n def longestSubstring02(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_1|>\n\n def longestSubstring03(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_2|>\n\n def longestSubstring04(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_3|>\n\n def longestSubstring05(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_4|>\n\n def longestSubstring06(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def longestSubstring(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n mask = 0\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n if temp[t] < k:\n mask |= 1 << t\n else:\n mask &= ~(1 << t)\n if mask == 0:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n\n def longestSubstring02(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = [0 for i in range(26)]\n max_idx = i\n for j in range(i, n):\n t = ord(s[j]) - ord('a')\n temp[t] += 1\n mask = min(filter(bool, temp))\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n i = max_idx + 1\n return res\n\n def longestSubstring03(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n i = min(mpos)\n else:\n i = max_idx + 1\n return res\n\n def longestSubstring04(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n n = len(s)\n if n < k:\n return 0\n i = 0\n res = 0\n while i <= n - k:\n temp = {}\n max_idx = i\n pos = {}\n skip = True\n for j in range(i, n):\n if s[j] in temp:\n temp[s[j]] = temp.get(s[j]) + 1\n else:\n temp[s[j]] = 1\n pos[s[j]] = j\n mask = min(temp.values())\n if mask >= k:\n res = max(res, j - i + 1)\n max_idx = j\n skip = False\n if skip:\n mpos = [pos[item[0]] for item in temp.items() if item[1] >= k]\n if len(mpos) == 0:\n break\n i = max(min(mpos), i + 1)\n else:\n i = max_idx + 1\n return res\n\n def longestSubstring05(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n if len(s) < k:\n return 0\n dict_s = {}\n for i in s:\n if dict_s.get(i):\n dict_s[i] += 1\n else:\n dict_s[i] = 1\n max_len = 0\n start = 0\n f = 0\n for i in range(len(s)):\n if dict_s[s[i]] < k:\n max_len = max(max_len, self.longestSubstring(s[start:i], k))\n start = i + 1\n f = 1\n if len(s) - start >= k and f == 0:\n max_len = max(max_len, len(s) - start)\n else:\n max_len = max(max_len, self.longestSubstring(s[start:], k))\n return max_len\n\n def longestSubstring06(self, s, k):\n \"\"\":type s: str :type k: int :rtype: int\"\"\"\n dic = {}\n for i in s:\n dic[i] = dic.get(i, 0) + 1\n for j in dic:\n if dic[j] < k:\n ss = s.split(j)\n return max([self.longestSubstring(t, k) for t in ss])\n return len(s)\n", "source": "the_stack_v2_python_sparse", "source_path": "longestSubstring.py", "source_repo": "vinlinch/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "9534f33286a7ed8895c05302321c87eecf6f8be0", "bodies": ["if self.session.loggedIn:\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert(\"It looks like you're already signed in!\", 'Hey there!', 'info')\nelse:\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n return loginForm", "passwd = self.env['members']['password']\nname = self.env['members']['username']\ntry:\n um.userORM.login(name, passwd, self.env['cookie'])\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert('Welcome back, %s!' % name, 'Ohia!', 'success')\nexcept Exception as exc:\n self.session.pushAlert('%s
Please try again.' % exc, 'Uh oh...', 'error')\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n if type(exc) == use.usernameError:\n loginForm.usernameError = True\n elif type(exc) == use.passwordError:\n loginForm.passwordError = True\n loginForm.username = name\n elif type(exc) == use.banError:\n loginForm.banError = True\n return loginForm"], "bodies_text": "<|body_start_0|>\n if self.session.loggedIn:\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert(\"It looks like you're already signed in!\", 'Hey there!', 'info')\n else:\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n return loginForm\n<|end_body_0|>\n\n<|body_start_1|>\n passwd = self.env['members']['password']\n name = self.env['members']['username']\n try:\n um.userORM.login(name, passwd, self.env['cookie'])\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert('Welcome back, %s!' % name, 'Ohia!', 'success')\n except Exception as exc:\n self.session.pushAlert('%s
Please try again.' % exc, 'Uh oh...', 'error')\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n if type(exc) == use.usernameError:\n loginForm.usernameError = True\n elif type(exc) == use.passwordError:\n loginForm.passwordError = True\n loginForm.username = name\n elif type(exc) == use.banError:\n loginForm.banError = True\n return loginForm\n<|end_body_1|>\n", "class_docstring": "", "class_name": "authLogin", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass authLogin:\n\n def GET(self):\n \"\"\"Display the login page or redirect to their dashboard if they are already logged in\"\"\"\n <|body_0|>\n\n def POST(self):\n \"\"\"Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.session.loggedIn:\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert(\"It looks like you're already signed in!\", 'Hey there!', 'info')\n else:\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n return loginForm\n<|end_body_0|>\n\n<|body_start_1|>\n passwd = self.env['members']['password']\n name = self.env['members']['username']\n try:\n um.userORM.login(name, passwd, self.env['cookie'])\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert('Welcome back, %s!' % name, 'Ohia!', 'success')\n except Exception as exc:\n self.session.pushAlert('%s
Please try again.' % exc, 'Uh oh...', 'error')\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n if type(exc) == use.usernameError:\n loginForm.usernameError = True\n elif type(exc) == use.passwordError:\n loginForm.passwordError = True\n loginForm.username = name\n elif type(exc) == use.banError:\n loginForm.banError = True\n return loginForm\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000371", "length_bytes": 2033, "license_type": "permissive", "methods": [{"docstring": "Display the login page or redirect to their dashboard if they are already logged in", "name": "GET", "signature": "def GET(self)"}, {"docstring": "Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.", "name": "POST", "signature": "def POST(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_000425", "prompt": "Implement the Python class `authLogin` described below.\n\nClass description:\nImplement the authLogin class.\n\nMethod signatures and docstrings:\n- def GET(self): Display the login page or redirect to their dashboard if they are already logged in\n- def POST(self): Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.", "prompted_full_text": "Implement the Python class `authLogin` described below.\n\nClass description:\nImplement the authLogin class.\n\nMethod signatures and docstrings:\n- def GET(self): Display the login page or redirect to their dashboard if they are already logged in\n- def POST(self): Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.\n\n<|skeleton|>\nclass authLogin:\n\n def GET(self):\n \"\"\"Display the login page or redirect to their dashboard if they are already logged in\"\"\"\n <|body_0|>\n\n def POST(self):\n \"\"\"Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.session.loggedIn:\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert(\"It looks like you're already signed in!\", 'Hey there!', 'info')\n else:\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n return loginForm\n<|end_body_0|>\n\n<|body_start_1|>\n passwd = self.env['members']['password']\n name = self.env['members']['username']\n try:\n um.userORM.login(name, passwd, self.env['cookie'])\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert('Welcome back, %s!' % name, 'Ohia!', 'success')\n except Exception as exc:\n self.session.pushAlert('%s
Please try again.' % exc, 'Uh oh...', 'error')\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n if type(exc) == use.usernameError:\n loginForm.usernameError = True\n elif type(exc) == use.passwordError:\n loginForm.passwordError = True\n loginForm.username = name\n elif type(exc) == use.banError:\n loginForm.banError = True\n return loginForm\n<|end_body_1|>\n", "revision_id": "fbf9d0c5278a23f18b586dee89bb054794b3d452", "skeleton": "<|skeleton|>\nclass authLogin:\n\n def GET(self):\n \"\"\"Display the login page or redirect to their dashboard if they are already logged in\"\"\"\n <|body_0|>\n\n def POST(self):\n \"\"\"Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class authLogin:\n def GET(self):\n \"\"\"Display the login page or redirect to their dashboard if they are already logged in\"\"\"\n if self.session.loggedIn:\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert(\"It looks like you're already signed in!\", 'Hey there!', 'info')\n else:\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n return loginForm\n\n def POST(self):\n \"\"\"Use form data to check login, and the redirect if successful if not successful then redirect to the login page again.\"\"\"\n passwd = self.env['members']['password']\n name = self.env['members']['username']\n try:\n um.userORM.login(name, passwd, self.env['cookie'])\n self.head = ('303 SEE OTHER', [('location', '/you')])\n self.session.pushAlert('Welcome back, %s!' % name, 'Ohia!', 'success')\n except Exception as exc:\n self.session.pushAlert('%s
Please try again.' % exc, 'Uh oh...', 'error')\n loginForm = authLoginTmpl(searchList=[self.tmplSearchList])\n if type(exc) == use.usernameError:\n loginForm.usernameError = True\n elif type(exc) == use.passwordError:\n loginForm.passwordError = True\n loginForm.username = name\n elif type(exc) == use.banError:\n loginForm.banError = True\n return loginForm\n", "source": "the_stack_v2_python_sparse", "source_path": "flagr_core/controllers/auth/authLoginController.py", "source_repo": "JoshAshby/Fla.gr", "split": "test", "star_events_count": 0} {"blob_id": "e087cc6058e2ac40e119c8d26af2b24fa9c96307", "bodies": ["cloud_id = request_args.get('id')\ncloud_name = request_args.get('name')\ndetails = request_args.get('details')\nif not (cloud_id or cloud_name):\n clouds_obj, link_params = dbapi.clouds_get_all(context, request_args, pagination_params)\n if details:\n clouds_obj = [utils.get_resource_with_vars(request_args, c) for c in clouds_obj]\nelse:\n if cloud_name:\n cloud_obj = dbapi.clouds_get_by_name(context, cloud_name)\n cloud_obj.data = cloud_obj.variables\n if cloud_id:\n cloud_obj = dbapi.clouds_get_by_id(context, cloud_id)\n cloud_obj.data = cloud_obj.variables\n clouds_obj = [cloud_obj]\n link_params = {}\nlinks = base.links_from(link_params)\nresponse_body = {'clouds': clouds_obj, 'links': links}\nreturn (jsonutils.to_primitive(response_body), 200, None)", "json = util.copy_project_id_into_json(context, request_data)\ncloud_obj = dbapi.clouds_create(context, json)\ncloud = jsonutils.to_primitive(cloud_obj)\nif 'variables' in json:\n cloud['variables'] = jsonutils.to_primitive(cloud_obj.variables)\nelse:\n cloud['variables'] = {}\nlocation = v1.api.url_for(CloudsById, id=cloud_obj.id, _external=True)\nheaders = {'Location': location}\nreturn (cloud, 201, headers)"], "bodies_text": "<|body_start_0|>\n cloud_id = request_args.get('id')\n cloud_name = request_args.get('name')\n details = request_args.get('details')\n if not (cloud_id or cloud_name):\n clouds_obj, link_params = dbapi.clouds_get_all(context, request_args, pagination_params)\n if details:\n clouds_obj = [utils.get_resource_with_vars(request_args, c) for c in clouds_obj]\n else:\n if cloud_name:\n cloud_obj = dbapi.clouds_get_by_name(context, cloud_name)\n cloud_obj.data = cloud_obj.variables\n if cloud_id:\n cloud_obj = dbapi.clouds_get_by_id(context, cloud_id)\n cloud_obj.data = cloud_obj.variables\n clouds_obj = [cloud_obj]\n link_params = {}\n links = base.links_from(link_params)\n response_body = {'clouds': clouds_obj, 'links': links}\n return (jsonutils.to_primitive(response_body), 200, None)\n<|end_body_0|>\n\n<|body_start_1|>\n json = util.copy_project_id_into_json(context, request_data)\n cloud_obj = dbapi.clouds_create(context, json)\n cloud = jsonutils.to_primitive(cloud_obj)\n if 'variables' in json:\n cloud['variables'] = jsonutils.to_primitive(cloud_obj.variables)\n else:\n cloud['variables'] = {}\n location = v1.api.url_for(CloudsById, id=cloud_obj.id, _external=True)\n headers = {'Location': location}\n return (cloud, 201, headers)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Clouds", "detected_licenses": ["LicenseRef-scancode-unknown-license-reference", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Clouds:\n\n def get(self, context, request_args, pagination_params):\n \"\"\"Get cloud(s) for the project. Get cloud details if for a particular cloud.\"\"\"\n <|body_0|>\n\n def post(self, context, request_data):\n \"\"\"Create a new cloud.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cloud_id = request_args.get('id')\n cloud_name = request_args.get('name')\n details = request_args.get('details')\n if not (cloud_id or cloud_name):\n clouds_obj, link_params = dbapi.clouds_get_all(context, request_args, pagination_params)\n if details:\n clouds_obj = [utils.get_resource_with_vars(request_args, c) for c in clouds_obj]\n else:\n if cloud_name:\n cloud_obj = dbapi.clouds_get_by_name(context, cloud_name)\n cloud_obj.data = cloud_obj.variables\n if cloud_id:\n cloud_obj = dbapi.clouds_get_by_id(context, cloud_id)\n cloud_obj.data = cloud_obj.variables\n clouds_obj = [cloud_obj]\n link_params = {}\n links = base.links_from(link_params)\n response_body = {'clouds': clouds_obj, 'links': links}\n return (jsonutils.to_primitive(response_body), 200, None)\n<|end_body_0|>\n\n<|body_start_1|>\n json = util.copy_project_id_into_json(context, request_data)\n cloud_obj = dbapi.clouds_create(context, json)\n cloud = jsonutils.to_primitive(cloud_obj)\n if 'variables' in json:\n cloud['variables'] = jsonutils.to_primitive(cloud_obj.variables)\n else:\n cloud['variables'] = {}\n location = v1.api.url_for(CloudsById, id=cloud_obj.id, _external=True)\n headers = {'Location': location}\n return (cloud, 201, headers)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000372", "length_bytes": 2767, "license_type": "permissive", "methods": [{"docstring": "Get cloud(s) for the project. Get cloud details if for a particular cloud.", "name": "get", "signature": "def get(self, context, request_args, pagination_params)"}, {"docstring": "Create a new cloud.", "name": "post", "signature": "def post(self, context, request_data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001664", "prompt": "Implement the Python class `Clouds` described below.\n\nClass description:\nImplement the Clouds class.\n\nMethod signatures and docstrings:\n- def get(self, context, request_args, pagination_params): Get cloud(s) for the project. Get cloud details if for a particular cloud.\n- def post(self, context, request_data): Create a new cloud.", "prompted_full_text": "Implement the Python class `Clouds` described below.\n\nClass description:\nImplement the Clouds class.\n\nMethod signatures and docstrings:\n- def get(self, context, request_args, pagination_params): Get cloud(s) for the project. Get cloud details if for a particular cloud.\n- def post(self, context, request_data): Create a new cloud.\n\n<|skeleton|>\nclass Clouds:\n\n def get(self, context, request_args, pagination_params):\n \"\"\"Get cloud(s) for the project. Get cloud details if for a particular cloud.\"\"\"\n <|body_0|>\n\n def post(self, context, request_data):\n \"\"\"Create a new cloud.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n cloud_id = request_args.get('id')\n cloud_name = request_args.get('name')\n details = request_args.get('details')\n if not (cloud_id or cloud_name):\n clouds_obj, link_params = dbapi.clouds_get_all(context, request_args, pagination_params)\n if details:\n clouds_obj = [utils.get_resource_with_vars(request_args, c) for c in clouds_obj]\n else:\n if cloud_name:\n cloud_obj = dbapi.clouds_get_by_name(context, cloud_name)\n cloud_obj.data = cloud_obj.variables\n if cloud_id:\n cloud_obj = dbapi.clouds_get_by_id(context, cloud_id)\n cloud_obj.data = cloud_obj.variables\n clouds_obj = [cloud_obj]\n link_params = {}\n links = base.links_from(link_params)\n response_body = {'clouds': clouds_obj, 'links': links}\n return (jsonutils.to_primitive(response_body), 200, None)\n<|end_body_0|>\n\n<|body_start_1|>\n json = util.copy_project_id_into_json(context, request_data)\n cloud_obj = dbapi.clouds_create(context, json)\n cloud = jsonutils.to_primitive(cloud_obj)\n if 'variables' in json:\n cloud['variables'] = jsonutils.to_primitive(cloud_obj.variables)\n else:\n cloud['variables'] = {}\n location = v1.api.url_for(CloudsById, id=cloud_obj.id, _external=True)\n headers = {'Location': location}\n return (cloud, 201, headers)\n<|end_body_1|>\n", "revision_id": "555d0c885692200b6ab3dbb3adf780784e389183", "skeleton": "<|skeleton|>\nclass Clouds:\n\n def get(self, context, request_args, pagination_params):\n \"\"\"Get cloud(s) for the project. Get cloud details if for a particular cloud.\"\"\"\n <|body_0|>\n\n def post(self, context, request_data):\n \"\"\"Create a new cloud.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Clouds:\n def get(self, context, request_args, pagination_params):\n \"\"\"Get cloud(s) for the project. Get cloud details if for a particular cloud.\"\"\"\n cloud_id = request_args.get('id')\n cloud_name = request_args.get('name')\n details = request_args.get('details')\n if not (cloud_id or cloud_name):\n clouds_obj, link_params = dbapi.clouds_get_all(context, request_args, pagination_params)\n if details:\n clouds_obj = [utils.get_resource_with_vars(request_args, c) for c in clouds_obj]\n else:\n if cloud_name:\n cloud_obj = dbapi.clouds_get_by_name(context, cloud_name)\n cloud_obj.data = cloud_obj.variables\n if cloud_id:\n cloud_obj = dbapi.clouds_get_by_id(context, cloud_id)\n cloud_obj.data = cloud_obj.variables\n clouds_obj = [cloud_obj]\n link_params = {}\n links = base.links_from(link_params)\n response_body = {'clouds': clouds_obj, 'links': links}\n return (jsonutils.to_primitive(response_body), 200, None)\n\n def post(self, context, request_data):\n \"\"\"Create a new cloud.\"\"\"\n json = util.copy_project_id_into_json(context, request_data)\n cloud_obj = dbapi.clouds_create(context, json)\n cloud = jsonutils.to_primitive(cloud_obj)\n if 'variables' in json:\n cloud['variables'] = jsonutils.to_primitive(cloud_obj.variables)\n else:\n cloud['variables'] = {}\n location = v1.api.url_for(CloudsById, id=cloud_obj.id, _external=True)\n headers = {'Location': location}\n return (cloud, 201, headers)\n", "source": "the_stack_v2_python_sparse", "source_path": "craton/api/v1/resources/inventory/clouds.py", "source_repo": "mark4h/craton", "split": "test", "star_events_count": 0} {"blob_id": "26ede986262d119679cd0e47baa6dbfbb108f966", "bodies": ["super().__init__()\nextension = os.path.splitext(filename)[1]\nif extension != '.ntf' and extension != '.tif':\n raise RuntimeError('{} is not a NITF or TIFF file'.format(filename))\nself.extension = extension\nif not xml_filename:\n xml_filename = filename.replace(self.extension, '.xml')\nif not os.path.isfile(xml_filename):\n raise RuntimeError('{} does not exist'.format(xml_filename))\nself.xml_filename = xml_filename\ntree = ET.parse(self.xml_filename)\nself.imd_tag = tree.getroot().find('IMD')\nif self.imd_tag is None:\n raise RuntimeError('Unable to locate the \"IMD\" tag in {}'.format(xml_filename))\ntry:\n self.bandNameList = [n.tag for n in self.imd_tag if n.tag.startswith('BAND_')]\nexcept ValueError:\n self.bandNameList = None\nself.footprintsGml = None\nself.mean_sunaz = self.get_xml_tag(xml_tag='MEANSUNAZ')\nself.mean_sunel = self.get_xml_tag(xml_tag='MEANSUNEL')\nself.mean_sataz = self.get_xml_tag(xml_tag='MEANSATAZ')\nself.mean_satel = self.get_xml_tag(xml_tag='MEANSATEL')\nself.mean_intrack_viewangle = self.get_xml_tag(xml_tag='MEANINTRACKVIEWANGLE')\nself.mean_crosstrack_viewangle = self.get_xml_tag(xml_tag='MEANCROSSTRACKVIEWANGLE')\nself.mean_offnadir_viewangle = self.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE')", "value = self.imd_tag.find('IMAGE').find(xml_tag)\nif value is not None:\n return float(value.text)\nelse:\n warnings.warn('Unable to locate {}, return None.'.format(xml_tag))\n return None"], "bodies_text": "<|body_start_0|>\n super().__init__()\n extension = os.path.splitext(filename)[1]\n if extension != '.ntf' and extension != '.tif':\n raise RuntimeError('{} is not a NITF or TIFF file'.format(filename))\n self.extension = extension\n if not xml_filename:\n xml_filename = filename.replace(self.extension, '.xml')\n if not os.path.isfile(xml_filename):\n raise RuntimeError('{} does not exist'.format(xml_filename))\n self.xml_filename = xml_filename\n tree = ET.parse(self.xml_filename)\n self.imd_tag = tree.getroot().find('IMD')\n if self.imd_tag is None:\n raise RuntimeError('Unable to locate the \"IMD\" tag in {}'.format(xml_filename))\n try:\n self.bandNameList = [n.tag for n in self.imd_tag if n.tag.startswith('BAND_')]\n except ValueError:\n self.bandNameList = None\n self.footprintsGml = None\n self.mean_sunaz = self.get_xml_tag(xml_tag='MEANSUNAZ')\n self.mean_sunel = self.get_xml_tag(xml_tag='MEANSUNEL')\n self.mean_sataz = self.get_xml_tag(xml_tag='MEANSATAZ')\n self.mean_satel = self.get_xml_tag(xml_tag='MEANSATEL')\n self.mean_intrack_viewangle = self.get_xml_tag(xml_tag='MEANINTRACKVIEWANGLE')\n self.mean_crosstrack_viewangle = self.get_xml_tag(xml_tag='MEANCROSSTRACKVIEWANGLE')\n self.mean_offnadir_viewangle = self.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE')\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.imd_tag.find('IMAGE').find(xml_tag)\n if value is not None:\n return float(value.text)\n else:\n warnings.warn('Unable to locate {}, return None.'.format(xml_tag))\n return None\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DGFile", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DGFile:\n\n def __init__(self, filename, xml_filename=None, logger=None):\n \"\"\"Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle\"\"\"\n <|body_0|>\n\n def get_xml_tag(self, xml_tag=None):\n \"\"\":param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n extension = os.path.splitext(filename)[1]\n if extension != '.ntf' and extension != '.tif':\n raise RuntimeError('{} is not a NITF or TIFF file'.format(filename))\n self.extension = extension\n if not xml_filename:\n xml_filename = filename.replace(self.extension, '.xml')\n if not os.path.isfile(xml_filename):\n raise RuntimeError('{} does not exist'.format(xml_filename))\n self.xml_filename = xml_filename\n tree = ET.parse(self.xml_filename)\n self.imd_tag = tree.getroot().find('IMD')\n if self.imd_tag is None:\n raise RuntimeError('Unable to locate the \"IMD\" tag in {}'.format(xml_filename))\n try:\n self.bandNameList = [n.tag for n in self.imd_tag if n.tag.startswith('BAND_')]\n except ValueError:\n self.bandNameList = None\n self.footprintsGml = None\n self.mean_sunaz = self.get_xml_tag(xml_tag='MEANSUNAZ')\n self.mean_sunel = self.get_xml_tag(xml_tag='MEANSUNEL')\n self.mean_sataz = self.get_xml_tag(xml_tag='MEANSATAZ')\n self.mean_satel = self.get_xml_tag(xml_tag='MEANSATEL')\n self.mean_intrack_viewangle = self.get_xml_tag(xml_tag='MEANINTRACKVIEWANGLE')\n self.mean_crosstrack_viewangle = self.get_xml_tag(xml_tag='MEANCROSSTRACKVIEWANGLE')\n self.mean_offnadir_viewangle = self.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE')\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.imd_tag.find('IMAGE').find(xml_tag)\n if value is not None:\n return float(value.text)\n else:\n warnings.warn('Unable to locate {}, return None.'.format(xml_tag))\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000373", "length_bytes": 4793, "license_type": "permissive", "methods": [{"docstring": "Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle", "name": "__init__", "signature": "def __init__(self, filename, xml_filename=None, logger=None)"}, {"docstring": ":param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------", "name": "get_xml_tag", "signature": "def get_xml_tag(self, xml_tag=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020730", "prompt": "Implement the Python class `DGFile` described below.\n\nClass description:\nImplement the DGFile class.\n\nMethod signatures and docstrings:\n- def __init__(self, filename, xml_filename=None, logger=None): Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle\n- def get_xml_tag(self, xml_tag=None): :param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------", "prompted_full_text": "Implement the Python class `DGFile` described below.\n\nClass description:\nImplement the DGFile class.\n\nMethod signatures and docstrings:\n- def __init__(self, filename, xml_filename=None, logger=None): Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle\n- def get_xml_tag(self, xml_tag=None): :param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------\n\n<|skeleton|>\nclass DGFile:\n\n def __init__(self, filename, xml_filename=None, logger=None):\n \"\"\"Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle\"\"\"\n <|body_0|>\n\n def get_xml_tag(self, xml_tag=None):\n \"\"\":param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n extension = os.path.splitext(filename)[1]\n if extension != '.ntf' and extension != '.tif':\n raise RuntimeError('{} is not a NITF or TIFF file'.format(filename))\n self.extension = extension\n if not xml_filename:\n xml_filename = filename.replace(self.extension, '.xml')\n if not os.path.isfile(xml_filename):\n raise RuntimeError('{} does not exist'.format(xml_filename))\n self.xml_filename = xml_filename\n tree = ET.parse(self.xml_filename)\n self.imd_tag = tree.getroot().find('IMD')\n if self.imd_tag is None:\n raise RuntimeError('Unable to locate the \"IMD\" tag in {}'.format(xml_filename))\n try:\n self.bandNameList = [n.tag for n in self.imd_tag if n.tag.startswith('BAND_')]\n except ValueError:\n self.bandNameList = None\n self.footprintsGml = None\n self.mean_sunaz = self.get_xml_tag(xml_tag='MEANSUNAZ')\n self.mean_sunel = self.get_xml_tag(xml_tag='MEANSUNEL')\n self.mean_sataz = self.get_xml_tag(xml_tag='MEANSATAZ')\n self.mean_satel = self.get_xml_tag(xml_tag='MEANSATEL')\n self.mean_intrack_viewangle = self.get_xml_tag(xml_tag='MEANINTRACKVIEWANGLE')\n self.mean_crosstrack_viewangle = self.get_xml_tag(xml_tag='MEANCROSSTRACKVIEWANGLE')\n self.mean_offnadir_viewangle = self.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE')\n<|end_body_0|>\n\n<|body_start_1|>\n value = self.imd_tag.find('IMAGE').find(xml_tag)\n if value is not None:\n return float(value.text)\n else:\n warnings.warn('Unable to locate {}, return None.'.format(xml_tag))\n return None\n<|end_body_1|>\n", "revision_id": "b559caf18ed3e12128ce4f8c9fb75e4f12df950e", "skeleton": "<|skeleton|>\nclass DGFile:\n\n def __init__(self, filename, xml_filename=None, logger=None):\n \"\"\"Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle\"\"\"\n <|body_0|>\n\n def get_xml_tag(self, xml_tag=None):\n \"\"\":param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DGFile:\n def __init__(self, filename, xml_filename=None, logger=None):\n \"\"\"Default DGFile initializer ---------- Parameters ---------- :param filename: string refering to NITF or TIF filename :param filename: string refering to XML filename :param logger: log file ---------- Attributes ---------- self.extension: string for raster filename extension (.nitf or .tif) self.xml_filename: string with XML filename self.imd_tag: XML object with IMD tag from XML file self.bandNameList: list of band names self.footprintsGml: footprints from XML file self.mean_sunaz: mean sun azimuth angle self.mean_sunel: mean sun elevation angle self.mean_sataz: mean satellite azimuth angle self.mean_satel: mean satellite elevation angle self.mean_intrack_viewangle: mean in track view angle\"\"\"\n super().__init__()\n extension = os.path.splitext(filename)[1]\n if extension != '.ntf' and extension != '.tif':\n raise RuntimeError('{} is not a NITF or TIFF file'.format(filename))\n self.extension = extension\n if not xml_filename:\n xml_filename = filename.replace(self.extension, '.xml')\n if not os.path.isfile(xml_filename):\n raise RuntimeError('{} does not exist'.format(xml_filename))\n self.xml_filename = xml_filename\n tree = ET.parse(self.xml_filename)\n self.imd_tag = tree.getroot().find('IMD')\n if self.imd_tag is None:\n raise RuntimeError('Unable to locate the \"IMD\" tag in {}'.format(xml_filename))\n try:\n self.bandNameList = [n.tag for n in self.imd_tag if n.tag.startswith('BAND_')]\n except ValueError:\n self.bandNameList = None\n self.footprintsGml = None\n self.mean_sunaz = self.get_xml_tag(xml_tag='MEANSUNAZ')\n self.mean_sunel = self.get_xml_tag(xml_tag='MEANSUNEL')\n self.mean_sataz = self.get_xml_tag(xml_tag='MEANSATAZ')\n self.mean_satel = self.get_xml_tag(xml_tag='MEANSATEL')\n self.mean_intrack_viewangle = self.get_xml_tag(xml_tag='MEANINTRACKVIEWANGLE')\n self.mean_crosstrack_viewangle = self.get_xml_tag(xml_tag='MEANCROSSTRACKVIEWANGLE')\n self.mean_offnadir_viewangle = self.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE')\n\n def get_xml_tag(self, xml_tag=None):\n \"\"\":param xml_tag: string refering to XML tag :return: float value from XML tag ---------- Example raster.get_xml_tag(xml_tag='MEANOFFNADIRVIEWANGLE') ----------\"\"\"\n value = self.imd_tag.find('IMAGE').find(xml_tag)\n if value is not None:\n return float(value.text)\n else:\n warnings.warn('Unable to locate {}, return None.'.format(xml_tag))\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "terragpu/metadata/dgfile.py", "source_repo": "nasa-nccs-hpda/terragpu", "split": "test", "star_events_count": 2} {"blob_id": "5b051e233b3dfce18633c43d1695117924c8f6bf", "bodies": ["self.expression_matrix = expression_matrix\nself.gene_ref = gene_ref\nself.cell_info = cell_info\nself.gene_num, self.cell_num = expression_matrix.shape\nif len(cell_info) != self.cell_num:\n print(len(cell_info))\n print(self.cell_num)\n raise Exception('Cells Number Not Match')\nif len(gene_ref) != self.gene_num:\n print(len(gene_ref))\n print(self.gene_num)\n raise Exception('Genes Number Not Match')\nif proc is None:\n self.processed = ['read data']\nelse:\n self.processed = proc\nself.proc_func = []\nself.meta_info = meta_info\nself.meta_info['processed'] = proc", "try:\n proc_func(self)\nexcept Exception as E:\n print('process fail')\n raise E\nself.processed.append(proc_func.process)\nself.proc_func.append(proc_func)\nself.gene_num, self.cell_num = self.expression_matrix.shape\nreturn self"], "bodies_text": "<|body_start_0|>\n self.expression_matrix = expression_matrix\n self.gene_ref = gene_ref\n self.cell_info = cell_info\n self.gene_num, self.cell_num = expression_matrix.shape\n if len(cell_info) != self.cell_num:\n print(len(cell_info))\n print(self.cell_num)\n raise Exception('Cells Number Not Match')\n if len(gene_ref) != self.gene_num:\n print(len(gene_ref))\n print(self.gene_num)\n raise Exception('Genes Number Not Match')\n if proc is None:\n self.processed = ['read data']\n else:\n self.processed = proc\n self.proc_func = []\n self.meta_info = meta_info\n self.meta_info['processed'] = proc\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n proc_func(self)\n except Exception as E:\n print('process fail')\n raise E\n self.processed.append(proc_func.process)\n self.proc_func.append(proc_func)\n self.gene_num, self.cell_num = self.expression_matrix.shape\n return self\n<|end_body_1|>\n", "class_docstring": "Data object composed with expresison_matrix, gene_ref, and cell_info", "class_name": "singleCellData", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass singleCellData:\n \"\"\"Data object composed with expresison_matrix, gene_ref, and cell_info\"\"\"\n\n def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None):\n \"\"\"warper of singleCell data\"\"\"\n <|body_0|>\n\n def apply_proc(self, proc_func):\n \"\"\"save process infomation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.expression_matrix = expression_matrix\n self.gene_ref = gene_ref\n self.cell_info = cell_info\n self.gene_num, self.cell_num = expression_matrix.shape\n if len(cell_info) != self.cell_num:\n print(len(cell_info))\n print(self.cell_num)\n raise Exception('Cells Number Not Match')\n if len(gene_ref) != self.gene_num:\n print(len(gene_ref))\n print(self.gene_num)\n raise Exception('Genes Number Not Match')\n if proc is None:\n self.processed = ['read data']\n else:\n self.processed = proc\n self.proc_func = []\n self.meta_info = meta_info\n self.meta_info['processed'] = proc\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n proc_func(self)\n except Exception as E:\n print('process fail')\n raise E\n self.processed.append(proc_func.process)\n self.proc_func.append(proc_func)\n self.gene_num, self.cell_num = self.expression_matrix.shape\n return self\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000374", "length_bytes": 7297, "license_type": "permissive", "methods": [{"docstring": "warper of singleCell data", "name": "__init__", "signature": "def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None)"}, {"docstring": "save process infomation", "name": "apply_proc", "signature": "def apply_proc(self, proc_func)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_002221", "prompt": "Implement the Python class `singleCellData` described below.\n\nClass description:\nData object composed with expresison_matrix, gene_ref, and cell_info\n\nMethod signatures and docstrings:\n- def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None): warper of singleCell data\n- def apply_proc(self, proc_func): save process infomation", "prompted_full_text": "Implement the Python class `singleCellData` described below.\n\nClass description:\nData object composed with expresison_matrix, gene_ref, and cell_info\n\nMethod signatures and docstrings:\n- def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None): warper of singleCell data\n- def apply_proc(self, proc_func): save process infomation\n\n<|skeleton|>\nclass singleCellData:\n \"\"\"Data object composed with expresison_matrix, gene_ref, and cell_info\"\"\"\n\n def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None):\n \"\"\"warper of singleCell data\"\"\"\n <|body_0|>\n\n def apply_proc(self, proc_func):\n \"\"\"save process infomation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.expression_matrix = expression_matrix\n self.gene_ref = gene_ref\n self.cell_info = cell_info\n self.gene_num, self.cell_num = expression_matrix.shape\n if len(cell_info) != self.cell_num:\n print(len(cell_info))\n print(self.cell_num)\n raise Exception('Cells Number Not Match')\n if len(gene_ref) != self.gene_num:\n print(len(gene_ref))\n print(self.gene_num)\n raise Exception('Genes Number Not Match')\n if proc is None:\n self.processed = ['read data']\n else:\n self.processed = proc\n self.proc_func = []\n self.meta_info = meta_info\n self.meta_info['processed'] = proc\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n proc_func(self)\n except Exception as E:\n print('process fail')\n raise E\n self.processed.append(proc_func.process)\n self.proc_func.append(proc_func)\n self.gene_num, self.cell_num = self.expression_matrix.shape\n return self\n<|end_body_1|>\n", "revision_id": "4ab7335468838c0db56294d7a23ee0d1bdec55ea", "skeleton": "<|skeleton|>\nclass singleCellData:\n \"\"\"Data object composed with expresison_matrix, gene_ref, and cell_info\"\"\"\n\n def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None):\n \"\"\"warper of singleCell data\"\"\"\n <|body_0|>\n\n def apply_proc(self, proc_func):\n \"\"\"save process infomation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class singleCellData:\n \"\"\"Data object composed with expresison_matrix, gene_ref, and cell_info\"\"\"\n\n def __init__(self, expression_matrix, gene_ref, cell_info, meta_info, proc=None):\n \"\"\"warper of singleCell data\"\"\"\n self.expression_matrix = expression_matrix\n self.gene_ref = gene_ref\n self.cell_info = cell_info\n self.gene_num, self.cell_num = expression_matrix.shape\n if len(cell_info) != self.cell_num:\n print(len(cell_info))\n print(self.cell_num)\n raise Exception('Cells Number Not Match')\n if len(gene_ref) != self.gene_num:\n print(len(gene_ref))\n print(self.gene_num)\n raise Exception('Genes Number Not Match')\n if proc is None:\n self.processed = ['read data']\n else:\n self.processed = proc\n self.proc_func = []\n self.meta_info = meta_info\n self.meta_info['processed'] = proc\n\n def apply_proc(self, proc_func):\n \"\"\"save process infomation\"\"\"\n try:\n proc_func(self)\n except Exception as E:\n print('process fail')\n raise E\n self.processed.append(proc_func.process)\n self.proc_func.append(proc_func)\n self.gene_num, self.cell_num = self.expression_matrix.shape\n return self\n", "source": "the_stack_v2_python_sparse", "source_path": "SingleAnalyst/basic.py", "source_repo": "cannedfishcan/Singleanalyst", "split": "test", "star_events_count": 0} {"blob_id": "d9f6a35397f6ce6322b9c00943e454a47cf8b79a", "bodies": ["super().__init__()\nself._Z = Z\nself._layers, dim = (nn.ModuleList(), [M + self._Z] + hidden_size)\nfor d_in, d_out in zip(dim[:-1], dim[1:]):\n self._layers.append(nn.Sequential(nn.Linear(d_in, d_out), g))\nlayer = nn.Sequential(nn.Linear(dim[-1], M), nn.Sigmoid())\nself._layers.append(layer)", "if z is None:\n num_ele = m.shape[0]\n z = tc.FloatTensor(num_ele, self._Z)\n z.uniform_(0, 1)\no = torch.cat((m, z), dim=1)\nfor layer in self._layers:\n o = layer(o)\ng_theta = torch.max(m, o)\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n threshold = tc.FloatTensor([0.5])\nm_prime = (g_theta > threshold).float()\nreturn (m_prime, g_theta)"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self._Z = Z\n self._layers, dim = (nn.ModuleList(), [M + self._Z] + hidden_size)\n for d_in, d_out in zip(dim[:-1], dim[1:]):\n self._layers.append(nn.Sequential(nn.Linear(d_in, d_out), g))\n layer = nn.Sequential(nn.Linear(dim[-1], M), nn.Sigmoid())\n self._layers.append(layer)\n<|end_body_0|>\n\n<|body_start_1|>\n if z is None:\n num_ele = m.shape[0]\n z = tc.FloatTensor(num_ele, self._Z)\n z.uniform_(0, 1)\n o = torch.cat((m, z), dim=1)\n for layer in self._layers:\n o = layer(o)\n g_theta = torch.max(m, o)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n threshold = tc.FloatTensor([0.5])\n m_prime = (g_theta > threshold).float()\n return (m_prime, g_theta)\n<|end_body_1|>\n", "class_docstring": "MalGAN generator block", "class_name": "Generator", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Generator:\n \"\"\"MalGAN generator block\"\"\"\n\n def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module):\n \"\"\"Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function\"\"\"\n <|body_0|>\n\n def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple:\n \"\"\"Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._Z = Z\n self._layers, dim = (nn.ModuleList(), [M + self._Z] + hidden_size)\n for d_in, d_out in zip(dim[:-1], dim[1:]):\n self._layers.append(nn.Sequential(nn.Linear(d_in, d_out), g))\n layer = nn.Sequential(nn.Linear(dim[-1], M), nn.Sigmoid())\n self._layers.append(layer)\n<|end_body_0|>\n\n<|body_start_1|>\n if z is None:\n num_ele = m.shape[0]\n z = tc.FloatTensor(num_ele, self._Z)\n z.uniform_(0, 1)\n o = torch.cat((m, z), dim=1)\n for layer in self._layers:\n o = layer(o)\n g_theta = torch.max(m, o)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n threshold = tc.FloatTensor([0.5])\n m_prime = (g_theta > threshold).float()\n return (m_prime, g_theta)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000375", "length_bytes": 2566, "license_type": "permissive", "methods": [{"docstring": "Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function", "name": "__init__", "signature": "def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module)"}, {"docstring": "Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.", "name": "forward", "signature": "def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038469", "prompt": "Implement the Python class `Generator` described below.\n\nClass description:\nMalGAN generator block\n\nMethod signatures and docstrings:\n- def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module): Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function\n- def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple: Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.", "prompted_full_text": "Implement the Python class `Generator` described below.\n\nClass description:\nMalGAN generator block\n\nMethod signatures and docstrings:\n- def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module): Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function\n- def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple: Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.\n\n<|skeleton|>\nclass Generator:\n \"\"\"MalGAN generator block\"\"\"\n\n def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module):\n \"\"\"Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function\"\"\"\n <|body_0|>\n\n def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple:\n \"\"\"Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self._Z = Z\n self._layers, dim = (nn.ModuleList(), [M + self._Z] + hidden_size)\n for d_in, d_out in zip(dim[:-1], dim[1:]):\n self._layers.append(nn.Sequential(nn.Linear(d_in, d_out), g))\n layer = nn.Sequential(nn.Linear(dim[-1], M), nn.Sigmoid())\n self._layers.append(layer)\n<|end_body_0|>\n\n<|body_start_1|>\n if z is None:\n num_ele = m.shape[0]\n z = tc.FloatTensor(num_ele, self._Z)\n z.uniform_(0, 1)\n o = torch.cat((m, z), dim=1)\n for layer in self._layers:\n o = layer(o)\n g_theta = torch.max(m, o)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n threshold = tc.FloatTensor([0.5])\n m_prime = (g_theta > threshold).float()\n return (m_prime, g_theta)\n<|end_body_1|>\n", "revision_id": "c36647d1b3ba86a9a4e6e1a0bda2a371d8875781", "skeleton": "<|skeleton|>\nclass Generator:\n \"\"\"MalGAN generator block\"\"\"\n\n def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module):\n \"\"\"Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function\"\"\"\n <|body_0|>\n\n def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple:\n \"\"\"Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Generator:\n \"\"\"MalGAN generator block\"\"\"\n\n def __init__(self, M: int, Z: int, hidden_size: List[int], g: nn.Module):\n \"\"\"Generator Constructor :param M: Dimension of the feature vector \\\\p m :param Z: Dimension of the noise vector \\\\p z :param hidden_size: Width of the hidden layer(s) :param g: Activation function\"\"\"\n super().__init__()\n self._Z = Z\n self._layers, dim = (nn.ModuleList(), [M + self._Z] + hidden_size)\n for d_in, d_out in zip(dim[:-1], dim[1:]):\n self._layers.append(nn.Sequential(nn.Linear(d_in, d_out), g))\n layer = nn.Sequential(nn.Linear(dim[-1], M), nn.Sigmoid())\n self._layers.append(layer)\n\n def forward(self, m: torch.Tensor, z: torch.Tensor=None) -> TensorTuple:\n \"\"\"Forward pass through the generator. Automatically generates the noise vector \\\\p z that is coupled with \\\\p m. :param m: Input vector :math:`m` :param z: Noise vector :math:`z`. If no random vector is specified, the random vector is generated within this function call via a call to \\\\p torch.rand :return: Tuple of (:math:`m'`, :math:`G_{\\\\theta_{g}}`), i.e., the output tensor with the feature predictions as well as the smoothed prediction that can be used for back-propagation.\"\"\"\n if z is None:\n num_ele = m.shape[0]\n z = tc.FloatTensor(num_ele, self._Z)\n z.uniform_(0, 1)\n o = torch.cat((m, z), dim=1)\n for layer in self._layers:\n o = layer(o)\n g_theta = torch.max(m, o)\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n threshold = tc.FloatTensor([0.5])\n m_prime = (g_theta > threshold).float()\n return (m_prime, g_theta)\n", "source": "the_stack_v2_python_sparse", "source_path": "malgan/generator.py", "source_repo": "CyberForce/Pesidious", "split": "test", "star_events_count": 119} {"blob_id": "7eaf3e36f8ee9b5fc0b58c933795f9eddc60af64", "bodies": ["size = len(self)\nidx = operator.index(idx)\nif not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\nidx %= size\nreturn next(itertools.islice(iterator, idx, None))", "if isinstance(idx, str):\n return self._sub_layers[idx]\nelif isinstance(idx, slice):\n return self.__class__(*list(self._sub_layers.items())[idx])\nelse:\n return self._get_item_by_idx(self._sub_layers.values(), idx)", "if isinstance(idx, str):\n return setattr(self, str(idx), layer)\nelse:\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n return setattr(self, key, layer)", "if isinstance(idx, slice):\n for key in list(self._sub_layers.keys())[idx]:\n delattr(self, key)\nelif isinstance(idx, int):\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n delattr(self, key)\nelse:\n delattr(self, idx)"], "bodies_text": "<|body_start_0|>\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(itertools.islice(iterator, idx, None))\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(idx, str):\n return self._sub_layers[idx]\n elif isinstance(idx, slice):\n return self.__class__(*list(self._sub_layers.items())[idx])\n else:\n return self._get_item_by_idx(self._sub_layers.values(), idx)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(idx, str):\n return setattr(self, str(idx), layer)\n else:\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n return setattr(self, key, layer)\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(idx, slice):\n for key in list(self._sub_layers.keys())[idx]:\n delattr(self, key)\n elif isinstance(idx, int):\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n delattr(self, key)\n else:\n delattr(self, idx)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Sequential", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sequential:\n\n def _get_item_by_idx(self, iterator, idx):\n \"\"\"Get the idx-th item of the iterator\"\"\"\n <|body_0|>\n\n def __getitem__(self, idx: Union[slice, int, str]):\n \"\"\"get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']\"\"\"\n <|body_1|>\n\n def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None:\n \"\"\"set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`\"\"\"\n <|body_2|>\n\n def __delitem__(self, idx: Union[slice, int, str]) -> None:\n \"\"\"del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(itertools.islice(iterator, idx, None))\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(idx, str):\n return self._sub_layers[idx]\n elif isinstance(idx, slice):\n return self.__class__(*list(self._sub_layers.items())[idx])\n else:\n return self._get_item_by_idx(self._sub_layers.values(), idx)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(idx, str):\n return setattr(self, str(idx), layer)\n else:\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n return setattr(self, key, layer)\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(idx, slice):\n for key in list(self._sub_layers.keys())[idx]:\n delattr(self, key)\n elif isinstance(idx, int):\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n delattr(self, key)\n else:\n delattr(self, idx)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000376", "length_bytes": 2919, "license_type": "no_license", "methods": [{"docstring": "Get the idx-th item of the iterator", "name": "_get_item_by_idx", "signature": "def _get_item_by_idx(self, iterator, idx)"}, {"docstring": "get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']", "name": "__getitem__", "signature": "def __getitem__(self, idx: Union[slice, int, str])"}, {"docstring": "set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`", "name": "__setitem__", "signature": "def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None"}, {"docstring": "del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']", "name": "__delitem__", "signature": "def __delitem__(self, idx: Union[slice, int, str]) -> None"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_014621", "prompt": "Implement the Python class `Sequential` described below.\n\nClass description:\nImplement the Sequential class.\n\nMethod signatures and docstrings:\n- def _get_item_by_idx(self, iterator, idx): Get the idx-th item of the iterator\n- def __getitem__(self, idx: Union[slice, int, str]): get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']\n- def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None: set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`\n- def __delitem__(self, idx: Union[slice, int, str]) -> None: del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']", "prompted_full_text": "Implement the Python class `Sequential` described below.\n\nClass description:\nImplement the Sequential class.\n\nMethod signatures and docstrings:\n- def _get_item_by_idx(self, iterator, idx): Get the idx-th item of the iterator\n- def __getitem__(self, idx: Union[slice, int, str]): get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']\n- def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None: set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`\n- def __delitem__(self, idx: Union[slice, int, str]) -> None: del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']\n\n<|skeleton|>\nclass Sequential:\n\n def _get_item_by_idx(self, iterator, idx):\n \"\"\"Get the idx-th item of the iterator\"\"\"\n <|body_0|>\n\n def __getitem__(self, idx: Union[slice, int, str]):\n \"\"\"get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']\"\"\"\n <|body_1|>\n\n def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None:\n \"\"\"set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`\"\"\"\n <|body_2|>\n\n def __delitem__(self, idx: Union[slice, int, str]) -> None:\n \"\"\"del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(itertools.islice(iterator, idx, None))\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(idx, str):\n return self._sub_layers[idx]\n elif isinstance(idx, slice):\n return self.__class__(*list(self._sub_layers.items())[idx])\n else:\n return self._get_item_by_idx(self._sub_layers.values(), idx)\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(idx, str):\n return setattr(self, str(idx), layer)\n else:\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n return setattr(self, key, layer)\n<|end_body_2|>\n\n<|body_start_3|>\n if isinstance(idx, slice):\n for key in list(self._sub_layers.keys())[idx]:\n delattr(self, key)\n elif isinstance(idx, int):\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n delattr(self, key)\n else:\n delattr(self, idx)\n<|end_body_3|>\n", "revision_id": "353e7abfa7b02b45d2b7fec096b58e07651eb71d", "skeleton": "<|skeleton|>\nclass Sequential:\n\n def _get_item_by_idx(self, iterator, idx):\n \"\"\"Get the idx-th item of the iterator\"\"\"\n <|body_0|>\n\n def __getitem__(self, idx: Union[slice, int, str]):\n \"\"\"get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']\"\"\"\n <|body_1|>\n\n def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None:\n \"\"\"set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`\"\"\"\n <|body_2|>\n\n def __delitem__(self, idx: Union[slice, int, str]) -> None:\n \"\"\"del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Sequential:\n def _get_item_by_idx(self, iterator, idx):\n \"\"\"Get the idx-th item of the iterator\"\"\"\n size = len(self)\n idx = operator.index(idx)\n if not -size <= idx < size:\n raise IndexError('index {} is out of range'.format(idx))\n idx %= size\n return next(itertools.islice(iterator, idx, None))\n\n def __getitem__(self, idx: Union[slice, int, str]):\n \"\"\"get mm is sequential instance mm[1] mm[-1] mm[1:] mm['L1']\"\"\"\n if isinstance(idx, str):\n return self._sub_layers[idx]\n elif isinstance(idx, slice):\n return self.__class__(*list(self._sub_layers.items())[idx])\n else:\n return self._get_item_by_idx(self._sub_layers.values(), idx)\n\n def __setitem__(self, idx: Union[int, str], layer: paddle.nn.Layer) -> None:\n \"\"\"set mm is sequential instance mm[1] = `Layer Instance` mm['L1'] = `Layer Instance`\"\"\"\n if isinstance(idx, str):\n return setattr(self, str(idx), layer)\n else:\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n return setattr(self, key, layer)\n\n def __delitem__(self, idx: Union[slice, int, str]) -> None:\n \"\"\"del mm is sequential instance del mm[1] del mm[-1] del mm[1:] del mm['L1']\"\"\"\n if isinstance(idx, slice):\n for key in list(self._sub_layers.keys())[idx]:\n delattr(self, key)\n elif isinstance(idx, int):\n key = self._get_item_by_idx(self._sub_layers.keys(), idx)\n delattr(self, key)\n else:\n delattr(self, idx)\n", "source": "the_stack_v2_python_sparse", "source_path": "pp/paddle/container.py", "source_repo": "js-ts/AI", "split": "test", "star_events_count": 0} {"blob_id": "d492e81a2db18bc848c6b184fa503cf514a7d001", "bodies": ["self.time = 0\nself.time_old = 0\nif initial_current < 0:\n initial_current = 0\nif initial_voltage < 0:\n initial_voltage = 0\nself.current_old = initial_current\nself.voltage_old = initial_voltage\nself.current = initial_current\nself.voltage = initial_voltage", "self.time_old = self.time\nif time_new < 0:\n self.time = self.time\nelse:\n self.time = time_new\nself.current_old = self.current\nif new_current < 0:\n self.current = self.current\nelse:\n self.current = new_current\nself.voltage_old = self.voltage\nif new_voltage < 0:\n self.voltage = self.voltage\nelse:\n self.voltage = new_voltage\nenergy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\nreturn energy_consumed"], "bodies_text": "<|body_start_0|>\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n<|end_body_0|>\n\n<|body_start_1|>\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AuxSystem", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuxSystem:\n\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n <|body_0|>\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n<|end_body_0|>\n\n<|body_start_1|>\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000377", "length_bytes": 1950, "license_type": "no_license", "methods": [{"docstring": "Initialize Auxiliary System object", "name": "__init__", "signature": "def __init__(self, initial_current, initial_voltage)"}, {"docstring": "Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)", "name": "energy_consumed", "signature": "def energy_consumed(self, time_new, new_current, new_voltage)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_044587", "prompt": "Implement the Python class `AuxSystem` described below.\n\nClass description:\nImplement the AuxSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, initial_current, initial_voltage): Initialize Auxiliary System object\n- def energy_consumed(self, time_new, new_current, new_voltage): Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)", "prompted_full_text": "Implement the Python class `AuxSystem` described below.\n\nClass description:\nImplement the AuxSystem class.\n\nMethod signatures and docstrings:\n- def __init__(self, initial_current, initial_voltage): Initialize Auxiliary System object\n- def energy_consumed(self, time_new, new_current, new_voltage): Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\n\n<|skeleton|>\nclass AuxSystem:\n\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n <|body_0|>\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n<|end_body_0|>\n\n<|body_start_1|>\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n<|end_body_1|>\n", "revision_id": "73cb4b6e42d8b0b83ccde98affb6bc8abd13549b", "skeleton": "<|skeleton|>\nclass AuxSystem:\n\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n <|body_0|>\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuxSystem:\n def __init__(self, initial_current, initial_voltage):\n \"\"\"Initialize Auxiliary System object\"\"\"\n self.time = 0\n self.time_old = 0\n if initial_current < 0:\n initial_current = 0\n if initial_voltage < 0:\n initial_voltage = 0\n self.current_old = initial_current\n self.voltage_old = initial_voltage\n self.current = initial_current\n self.voltage = initial_voltage\n\n def energy_consumed(self, time_new, new_current, new_voltage):\n \"\"\"Calculate energy consumed by Auxiliary system. Power (W) = Current (A) x Voltage (V) Energy (J) = Power (W) x Time (s) Energy = Current (A) x Voltage (V) x Time (s)\"\"\"\n self.time_old = self.time\n if time_new < 0:\n self.time = self.time\n else:\n self.time = time_new\n self.current_old = self.current\n if new_current < 0:\n self.current = self.current\n else:\n self.current = new_current\n self.voltage_old = self.voltage\n if new_voltage < 0:\n self.voltage = self.voltage\n else:\n self.voltage = new_voltage\n energy_consumed = (self.current + self.current_old) / 2 * ((self.voltage + self.voltage_old) / 2) * (self.time - self.time_old)\n return energy_consumed\n", "source": "the_stack_v2_python_sparse", "source_path": "auxloss/auxsystem.py", "source_repo": "uw-midsun/strategy", "split": "test", "star_events_count": 19} {"blob_id": "4cd8b858327553c6f121ce50b44ec1653d08e330", "bodies": ["self.stdout.write('Re-assigining started', ending='\\n')\nif not args:\n raise CommandError('Param not set. ')\nif len(args) < 3:\n raise CommandError('Param not set. ')\napp = args[0]\nmodel = args[1]\nusername = args[2]\nnew_perms = list(args[3:])\nif username == 'all':\n users = User.objects.exclude(username__iexact=settings.ANONYMOUS_DEFAULT_USERNAME)\n teams = Team.objects.all()\nelse:\n users = User.objects.filter(username=username)\n teams = Team.objects.filter(organization__username=username)\nfor user in queryset_iterator(users):\n self.reassign_perms(user, app, model, new_perms)\nfor team in queryset_iterator(teams):\n self.reassign_perms(team, app, model, new_perms)\nself.stdout.write('Re-assigining finished', ending='\\n')", "if isinstance(user, Team):\n if model == 'project':\n objects = user.projectgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n else:\n objects = user.xformgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\nelif model == 'project':\n objects = user.projectuserobjectpermission_set.all()\nelse:\n objects = user.xformuserobjectpermission_set.all()\nfor perm_obj in objects:\n obj = perm_obj.content_object\n roles = [ReadOnlyRoleNoDownload, ReadOnlyRole, DataEntryOnlyRole, DataEntryMinorRole, DataEntryRole, EditorMinorRole, EditorRole, ManagerRole, OwnerRole]\n for role_class in reversed(roles):\n if self.check_role(role_class, user, obj, new_perm):\n role_class.add(user, obj)\n break", "new_perm = [] if new_perm is None else new_perm\nperm_list = role_class.class_to_permissions[type(obj)]\nold_perm_set = set(perm_list)\nnewly_added_perm = set(new_perm)\nif newly_added_perm.issubset(old_perm_set):\n diff_set = old_perm_set.difference(newly_added_perm)\n if isinstance(user, Team):\n return set(get_perms(user, obj)) == diff_set\n return user.has_perms(list(diff_set), obj)\nreturn False"], "bodies_text": "<|body_start_0|>\n self.stdout.write('Re-assigining started', ending='\\n')\n if not args:\n raise CommandError('Param not set. ')\n if len(args) < 3:\n raise CommandError('Param not set. ')\n app = args[0]\n model = args[1]\n username = args[2]\n new_perms = list(args[3:])\n if username == 'all':\n users = User.objects.exclude(username__iexact=settings.ANONYMOUS_DEFAULT_USERNAME)\n teams = Team.objects.all()\n else:\n users = User.objects.filter(username=username)\n teams = Team.objects.filter(organization__username=username)\n for user in queryset_iterator(users):\n self.reassign_perms(user, app, model, new_perms)\n for team in queryset_iterator(teams):\n self.reassign_perms(team, app, model, new_perms)\n self.stdout.write('Re-assigining finished', ending='\\n')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(user, Team):\n if model == 'project':\n objects = user.projectgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n else:\n objects = user.xformgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n elif model == 'project':\n objects = user.projectuserobjectpermission_set.all()\n else:\n objects = user.xformuserobjectpermission_set.all()\n for perm_obj in objects:\n obj = perm_obj.content_object\n roles = [ReadOnlyRoleNoDownload, ReadOnlyRole, DataEntryOnlyRole, DataEntryMinorRole, DataEntryRole, EditorMinorRole, EditorRole, ManagerRole, OwnerRole]\n for role_class in reversed(roles):\n if self.check_role(role_class, user, obj, new_perm):\n role_class.add(user, obj)\n break\n<|end_body_1|>\n\n<|body_start_2|>\n new_perm = [] if new_perm is None else new_perm\n perm_list = role_class.class_to_permissions[type(obj)]\n old_perm_set = set(perm_list)\n newly_added_perm = set(new_perm)\n if newly_added_perm.issubset(old_perm_set):\n diff_set = old_perm_set.difference(newly_added_perm)\n if isinstance(user, Team):\n return set(get_perms(user, obj)) == diff_set\n return user.has_perms(list(diff_set), obj)\n return False\n<|end_body_2|>\n", "class_docstring": "Reassign permission to the model when permissions are changed", "class_name": "Command", "detected_licenses": ["BSD-2-Clause", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Command:\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n <|body_0|>\n\n def reassign_perms(self, user, app, model, new_perm):\n \"\"\"Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:\"\"\"\n <|body_1|>\n\n def check_role(self, role_class, user, obj, new_perm=None):\n \"\"\"Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.stdout.write('Re-assigining started', ending='\\n')\n if not args:\n raise CommandError('Param not set. ')\n if len(args) < 3:\n raise CommandError('Param not set. ')\n app = args[0]\n model = args[1]\n username = args[2]\n new_perms = list(args[3:])\n if username == 'all':\n users = User.objects.exclude(username__iexact=settings.ANONYMOUS_DEFAULT_USERNAME)\n teams = Team.objects.all()\n else:\n users = User.objects.filter(username=username)\n teams = Team.objects.filter(organization__username=username)\n for user in queryset_iterator(users):\n self.reassign_perms(user, app, model, new_perms)\n for team in queryset_iterator(teams):\n self.reassign_perms(team, app, model, new_perms)\n self.stdout.write('Re-assigining finished', ending='\\n')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(user, Team):\n if model == 'project':\n objects = user.projectgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n else:\n objects = user.xformgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n elif model == 'project':\n objects = user.projectuserobjectpermission_set.all()\n else:\n objects = user.xformuserobjectpermission_set.all()\n for perm_obj in objects:\n obj = perm_obj.content_object\n roles = [ReadOnlyRoleNoDownload, ReadOnlyRole, DataEntryOnlyRole, DataEntryMinorRole, DataEntryRole, EditorMinorRole, EditorRole, ManagerRole, OwnerRole]\n for role_class in reversed(roles):\n if self.check_role(role_class, user, obj, new_perm):\n role_class.add(user, obj)\n break\n<|end_body_1|>\n\n<|body_start_2|>\n new_perm = [] if new_perm is None else new_perm\n perm_list = role_class.class_to_permissions[type(obj)]\n old_perm_set = set(perm_list)\n newly_added_perm = set(new_perm)\n if newly_added_perm.issubset(old_perm_set):\n diff_set = old_perm_set.difference(newly_added_perm)\n if isinstance(user, Team):\n return set(get_perms(user, obj)) == diff_set\n return user.has_perms(list(diff_set), obj)\n return False\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000378", "length_bytes": 4631, "license_type": "permissive", "methods": [{"docstring": "Reassign permission to the model when permissions are changed", "name": "handle", "signature": "def handle(self, *args, **options)"}, {"docstring": "Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:", "name": "reassign_perms", "signature": "def reassign_perms(self, user, app, model, new_perm)"}, {"docstring": "Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:", "name": "check_role", "signature": "def check_role(self, role_class, user, obj, new_perm=None)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_034973", "prompt": "Implement the Python class `Command` described below.\n\nClass description:\nReassign permission to the model when permissions are changed\n\nMethod signatures and docstrings:\n- def handle(self, *args, **options): Reassign permission to the model when permissions are changed\n- def reassign_perms(self, user, app, model, new_perm): Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:\n- def check_role(self, role_class, user, obj, new_perm=None): Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:", "prompted_full_text": "Implement the Python class `Command` described below.\n\nClass description:\nReassign permission to the model when permissions are changed\n\nMethod signatures and docstrings:\n- def handle(self, *args, **options): Reassign permission to the model when permissions are changed\n- def reassign_perms(self, user, app, model, new_perm): Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:\n- def check_role(self, role_class, user, obj, new_perm=None): Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:\n\n<|skeleton|>\nclass Command:\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n <|body_0|>\n\n def reassign_perms(self, user, app, model, new_perm):\n \"\"\"Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:\"\"\"\n <|body_1|>\n\n def check_role(self, role_class, user, obj, new_perm=None):\n \"\"\"Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.stdout.write('Re-assigining started', ending='\\n')\n if not args:\n raise CommandError('Param not set. ')\n if len(args) < 3:\n raise CommandError('Param not set. ')\n app = args[0]\n model = args[1]\n username = args[2]\n new_perms = list(args[3:])\n if username == 'all':\n users = User.objects.exclude(username__iexact=settings.ANONYMOUS_DEFAULT_USERNAME)\n teams = Team.objects.all()\n else:\n users = User.objects.filter(username=username)\n teams = Team.objects.filter(organization__username=username)\n for user in queryset_iterator(users):\n self.reassign_perms(user, app, model, new_perms)\n for team in queryset_iterator(teams):\n self.reassign_perms(team, app, model, new_perms)\n self.stdout.write('Re-assigining finished', ending='\\n')\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(user, Team):\n if model == 'project':\n objects = user.projectgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n else:\n objects = user.xformgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n elif model == 'project':\n objects = user.projectuserobjectpermission_set.all()\n else:\n objects = user.xformuserobjectpermission_set.all()\n for perm_obj in objects:\n obj = perm_obj.content_object\n roles = [ReadOnlyRoleNoDownload, ReadOnlyRole, DataEntryOnlyRole, DataEntryMinorRole, DataEntryRole, EditorMinorRole, EditorRole, ManagerRole, OwnerRole]\n for role_class in reversed(roles):\n if self.check_role(role_class, user, obj, new_perm):\n role_class.add(user, obj)\n break\n<|end_body_1|>\n\n<|body_start_2|>\n new_perm = [] if new_perm is None else new_perm\n perm_list = role_class.class_to_permissions[type(obj)]\n old_perm_set = set(perm_list)\n newly_added_perm = set(new_perm)\n if newly_added_perm.issubset(old_perm_set):\n diff_set = old_perm_set.difference(newly_added_perm)\n if isinstance(user, Team):\n return set(get_perms(user, obj)) == diff_set\n return user.has_perms(list(diff_set), obj)\n return False\n<|end_body_2|>\n", "revision_id": "e5bdec91cb47179172b515bbcb91701262ff3377", "skeleton": "<|skeleton|>\nclass Command:\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n <|body_0|>\n\n def reassign_perms(self, user, app, model, new_perm):\n \"\"\"Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:\"\"\"\n <|body_1|>\n\n def check_role(self, role_class, user, obj, new_perm=None):\n \"\"\"Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Command:\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n\n def handle(self, *args, **options):\n \"\"\"Reassign permission to the model when permissions are changed\"\"\"\n self.stdout.write('Re-assigining started', ending='\\n')\n if not args:\n raise CommandError('Param not set. ')\n if len(args) < 3:\n raise CommandError('Param not set. ')\n app = args[0]\n model = args[1]\n username = args[2]\n new_perms = list(args[3:])\n if username == 'all':\n users = User.objects.exclude(username__iexact=settings.ANONYMOUS_DEFAULT_USERNAME)\n teams = Team.objects.all()\n else:\n users = User.objects.filter(username=username)\n teams = Team.objects.filter(organization__username=username)\n for user in queryset_iterator(users):\n self.reassign_perms(user, app, model, new_perms)\n for team in queryset_iterator(teams):\n self.reassign_perms(team, app, model, new_perms)\n self.stdout.write('Re-assigining finished', ending='\\n')\n\n def reassign_perms(self, user, app, model, new_perm):\n \"\"\"Gets all the permissions the user has on objects and assigns the new permission to them :param user: :param app: :param model: :param new_perm: :return:\"\"\"\n if isinstance(user, Team):\n if model == 'project':\n objects = user.projectgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n else:\n objects = user.xformgroupobjectpermission_set.filter(group_id=user.pk).distinct('content_object_id')\n elif model == 'project':\n objects = user.projectuserobjectpermission_set.all()\n else:\n objects = user.xformuserobjectpermission_set.all()\n for perm_obj in objects:\n obj = perm_obj.content_object\n roles = [ReadOnlyRoleNoDownload, ReadOnlyRole, DataEntryOnlyRole, DataEntryMinorRole, DataEntryRole, EditorMinorRole, EditorRole, ManagerRole, OwnerRole]\n for role_class in reversed(roles):\n if self.check_role(role_class, user, obj, new_perm):\n role_class.add(user, obj)\n break\n\n def check_role(self, role_class, user, obj, new_perm=None):\n \"\"\"Test if the user has the role for the object provided :param role_class: :param user: :param obj: :param new_perm: :return:\"\"\"\n new_perm = [] if new_perm is None else new_perm\n perm_list = role_class.class_to_permissions[type(obj)]\n old_perm_set = set(perm_list)\n newly_added_perm = set(new_perm)\n if newly_added_perm.issubset(old_perm_set):\n diff_set = old_perm_set.difference(newly_added_perm)\n if isinstance(user, Team):\n return set(get_perms(user, obj)) == diff_set\n return user.has_perms(list(diff_set), obj)\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "onadata/apps/api/management/commands/reassign_permission.py", "source_repo": "onaio/onadata", "split": "test", "star_events_count": 177} {"blob_id": "cb5195a27b88a7769f8e68b46d386f3721042f4e", "bodies": ["dest_list = []\nwith open('data_layer/data_files/destinations.csv', encoding='utf-8') as file_stream:\n dest_reader = csv.DictReader(file_stream)\n for row in dest_reader:\n dest = Destination(row['country'], row['airport'], row['phone'], row['hours'], row['iata'])\n dest_list.append(dest)\n return dest_list", "new_dest_list = [dest.country, dest.airport, dest.phone, dest.hours, dest.iata]\nwith open('data_layer/data_files/destinations.csv', 'a+', encoding='utf-8', newline='') as file_stream:\n destwriter = csv.writer(file_stream)\n destwriter.writerow(new_dest_list)"], "bodies_text": "<|body_start_0|>\n dest_list = []\n with open('data_layer/data_files/destinations.csv', encoding='utf-8') as file_stream:\n dest_reader = csv.DictReader(file_stream)\n for row in dest_reader:\n dest = Destination(row['country'], row['airport'], row['phone'], row['hours'], row['iata'])\n dest_list.append(dest)\n return dest_list\n<|end_body_0|>\n\n<|body_start_1|>\n new_dest_list = [dest.country, dest.airport, dest.phone, dest.hours, dest.iata]\n with open('data_layer/data_files/destinations.csv', 'a+', encoding='utf-8', newline='') as file_stream:\n destwriter = csv.writer(file_stream)\n destwriter.writerow(new_dest_list)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DestinationData", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DestinationData:\n\n def get_destinations(self):\n \"\"\"Returns all destinations in a list\"\"\"\n <|body_0|>\n\n def create_destination(self, dest):\n \"\"\"Creates a new destination in database\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dest_list = []\n with open('data_layer/data_files/destinations.csv', encoding='utf-8') as file_stream:\n dest_reader = csv.DictReader(file_stream)\n for row in dest_reader:\n dest = Destination(row['country'], row['airport'], row['phone'], row['hours'], row['iata'])\n dest_list.append(dest)\n return dest_list\n<|end_body_0|>\n\n<|body_start_1|>\n new_dest_list = [dest.country, dest.airport, dest.phone, dest.hours, dest.iata]\n with open('data_layer/data_files/destinations.csv', 'a+', encoding='utf-8', newline='') as file_stream:\n destwriter = csv.writer(file_stream)\n destwriter.writerow(new_dest_list)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000379", "length_bytes": 970, "license_type": "no_license", "methods": [{"docstring": "Returns all destinations in a list", "name": "get_destinations", "signature": "def get_destinations(self)"}, {"docstring": "Creates a new destination in database", "name": "create_destination", "signature": "def create_destination(self, dest)"}], "n_methods": 2, "prompt": "Implement the Python class `DestinationData` described below.\n\nClass description:\nImplement the DestinationData class.\n\nMethod signatures and docstrings:\n- def get_destinations(self): Returns all destinations in a list\n- def create_destination(self, dest): Creates a new destination in database", "prompted_full_text": "Implement the Python class `DestinationData` described below.\n\nClass description:\nImplement the DestinationData class.\n\nMethod signatures and docstrings:\n- def get_destinations(self): Returns all destinations in a list\n- def create_destination(self, dest): Creates a new destination in database\n\n<|skeleton|>\nclass DestinationData:\n\n def get_destinations(self):\n \"\"\"Returns all destinations in a list\"\"\"\n <|body_0|>\n\n def create_destination(self, dest):\n \"\"\"Creates a new destination in database\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dest_list = []\n with open('data_layer/data_files/destinations.csv', encoding='utf-8') as file_stream:\n dest_reader = csv.DictReader(file_stream)\n for row in dest_reader:\n dest = Destination(row['country'], row['airport'], row['phone'], row['hours'], row['iata'])\n dest_list.append(dest)\n return dest_list\n<|end_body_0|>\n\n<|body_start_1|>\n new_dest_list = [dest.country, dest.airport, dest.phone, dest.hours, dest.iata]\n with open('data_layer/data_files/destinations.csv', 'a+', encoding='utf-8', newline='') as file_stream:\n destwriter = csv.writer(file_stream)\n destwriter.writerow(new_dest_list)\n<|end_body_1|>\n", "revision_id": "917c6c6c29ac998e58a4f9807f63e660a1b2bf54", "skeleton": "<|skeleton|>\nclass DestinationData:\n\n def get_destinations(self):\n \"\"\"Returns all destinations in a list\"\"\"\n <|body_0|>\n\n def create_destination(self, dest):\n \"\"\"Creates a new destination in database\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DestinationData:\n def get_destinations(self):\n \"\"\"Returns all destinations in a list\"\"\"\n dest_list = []\n with open('data_layer/data_files/destinations.csv', encoding='utf-8') as file_stream:\n dest_reader = csv.DictReader(file_stream)\n for row in dest_reader:\n dest = Destination(row['country'], row['airport'], row['phone'], row['hours'], row['iata'])\n dest_list.append(dest)\n return dest_list\n\n def create_destination(self, dest):\n \"\"\"Creates a new destination in database\"\"\"\n new_dest_list = [dest.country, dest.airport, dest.phone, dest.hours, dest.iata]\n with open('data_layer/data_files/destinations.csv', 'a+', encoding='utf-8', newline='') as file_stream:\n destwriter = csv.writer(file_stream)\n destwriter.writerow(new_dest_list)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/data_layer/destination_data.py", "source_repo": "ThorRafnar/NaNAirCarRental", "split": "test", "star_events_count": 0} {"blob_id": "32b63916f8b136dc8434985b4526cc7738b85792", "bodies": ["super(ProductLabelsQuery, self)._Initialize()\nself.tables = ['metadata_cube']\nself.fields = ['label', 'value', 'SUM(count) AS count']\nself.groups = ['label', 'value']\nself.orders = ['label', 'value']\nself.reply_processors += [self._ProcessReply]\nself.max_rows = 2000", "if start_date:\n start_date_expr = self.GetTimestampFromFilterExpression(start_date)\n self.wheres.append('day_timestamp >= %s' % start_date_expr)\nif end_date:\n end_date_expr = self.GetTimestampFromFilterExpression(end_date, True)\n self.wheres.append('day_timestamp <= %s' % end_date_expr)\nif product_name:\n self.wheres.append('product_name = \"%s\"' % product_name)\nif test:\n self.wheres.append('test = \"%s\"' % test)\nif metric:\n self.wheres.append('metric = \"%s\"' % metric)\nreturn super(ProductLabelsQuery, self).Execute()", "rows = reply['rows']\nlabels = label_manager.LabelManager()\nfor row in rows:\n labels.AddLabel(row['label'], row['value'], row['count'])\nreply['labels'] = labels.labels"], "bodies_text": "<|body_start_0|>\n super(ProductLabelsQuery, self)._Initialize()\n self.tables = ['metadata_cube']\n self.fields = ['label', 'value', 'SUM(count) AS count']\n self.groups = ['label', 'value']\n self.orders = ['label', 'value']\n self.reply_processors += [self._ProcessReply]\n self.max_rows = 2000\n<|end_body_0|>\n\n<|body_start_1|>\n if start_date:\n start_date_expr = self.GetTimestampFromFilterExpression(start_date)\n self.wheres.append('day_timestamp >= %s' % start_date_expr)\n if end_date:\n end_date_expr = self.GetTimestampFromFilterExpression(end_date, True)\n self.wheres.append('day_timestamp <= %s' % end_date_expr)\n if product_name:\n self.wheres.append('product_name = \"%s\"' % product_name)\n if test:\n self.wheres.append('test = \"%s\"' % test)\n if metric:\n self.wheres.append('metric = \"%s\"' % metric)\n return super(ProductLabelsQuery, self).Execute()\n<|end_body_1|>\n\n<|body_start_2|>\n rows = reply['rows']\n labels = label_manager.LabelManager()\n for row in rows:\n labels.AddLabel(row['label'], row['value'], row['count'])\n reply['labels'] = labels.labels\n<|end_body_2|>\n", "class_docstring": "Returns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.", "class_name": "ProductLabelsQuery", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProductLabelsQuery:\n \"\"\"Returns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.\"\"\"\n\n def _Initialize(self):\n \"\"\"Set up the query configuration (fields, tables, etc.).\"\"\"\n <|body_0|>\n\n def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None):\n \"\"\"Retrieves a list of labels and values for a specific product.\"\"\"\n <|body_1|>\n\n def _ProcessReply(self, reply):\n \"\"\"Transforms the raw labels data into a JSON list.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ProductLabelsQuery, self)._Initialize()\n self.tables = ['metadata_cube']\n self.fields = ['label', 'value', 'SUM(count) AS count']\n self.groups = ['label', 'value']\n self.orders = ['label', 'value']\n self.reply_processors += [self._ProcessReply]\n self.max_rows = 2000\n<|end_body_0|>\n\n<|body_start_1|>\n if start_date:\n start_date_expr = self.GetTimestampFromFilterExpression(start_date)\n self.wheres.append('day_timestamp >= %s' % start_date_expr)\n if end_date:\n end_date_expr = self.GetTimestampFromFilterExpression(end_date, True)\n self.wheres.append('day_timestamp <= %s' % end_date_expr)\n if product_name:\n self.wheres.append('product_name = \"%s\"' % product_name)\n if test:\n self.wheres.append('test = \"%s\"' % test)\n if metric:\n self.wheres.append('metric = \"%s\"' % metric)\n return super(ProductLabelsQuery, self).Execute()\n<|end_body_1|>\n\n<|body_start_2|>\n rows = reply['rows']\n labels = label_manager.LabelManager()\n for row in rows:\n labels.AddLabel(row['label'], row['value'], row['count'])\n reply['labels'] = labels.labels\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000380", "length_bytes": 2732, "license_type": "permissive", "methods": [{"docstring": "Set up the query configuration (fields, tables, etc.).", "name": "_Initialize", "signature": "def _Initialize(self)"}, {"docstring": "Retrieves a list of labels and values for a specific product.", "name": "Execute", "signature": "def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None)"}, {"docstring": "Transforms the raw labels data into a JSON list.", "name": "_ProcessReply", "signature": "def _ProcessReply(self, reply)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_015014", "prompt": "Implement the Python class `ProductLabelsQuery` described below.\n\nClass description:\nReturns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.\n\nMethod signatures and docstrings:\n- def _Initialize(self): Set up the query configuration (fields, tables, etc.).\n- def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None): Retrieves a list of labels and values for a specific product.\n- def _ProcessReply(self, reply): Transforms the raw labels data into a JSON list.", "prompted_full_text": "Implement the Python class `ProductLabelsQuery` described below.\n\nClass description:\nReturns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.\n\nMethod signatures and docstrings:\n- def _Initialize(self): Set up the query configuration (fields, tables, etc.).\n- def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None): Retrieves a list of labels and values for a specific product.\n- def _ProcessReply(self, reply): Transforms the raw labels data into a JSON list.\n\n<|skeleton|>\nclass ProductLabelsQuery:\n \"\"\"Returns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.\"\"\"\n\n def _Initialize(self):\n \"\"\"Set up the query configuration (fields, tables, etc.).\"\"\"\n <|body_0|>\n\n def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None):\n \"\"\"Retrieves a list of labels and values for a specific product.\"\"\"\n <|body_1|>\n\n def _ProcessReply(self, reply):\n \"\"\"Transforms the raw labels data into a JSON list.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ProductLabelsQuery, self)._Initialize()\n self.tables = ['metadata_cube']\n self.fields = ['label', 'value', 'SUM(count) AS count']\n self.groups = ['label', 'value']\n self.orders = ['label', 'value']\n self.reply_processors += [self._ProcessReply]\n self.max_rows = 2000\n<|end_body_0|>\n\n<|body_start_1|>\n if start_date:\n start_date_expr = self.GetTimestampFromFilterExpression(start_date)\n self.wheres.append('day_timestamp >= %s' % start_date_expr)\n if end_date:\n end_date_expr = self.GetTimestampFromFilterExpression(end_date, True)\n self.wheres.append('day_timestamp <= %s' % end_date_expr)\n if product_name:\n self.wheres.append('product_name = \"%s\"' % product_name)\n if test:\n self.wheres.append('test = \"%s\"' % test)\n if metric:\n self.wheres.append('metric = \"%s\"' % metric)\n return super(ProductLabelsQuery, self).Execute()\n<|end_body_1|>\n\n<|body_start_2|>\n rows = reply['rows']\n labels = label_manager.LabelManager()\n for row in rows:\n labels.AddLabel(row['label'], row['value'], row['count'])\n reply['labels'] = labels.labels\n<|end_body_2|>\n", "revision_id": "9efa61015d50c25f6d753f0212ad3bf16876d496", "skeleton": "<|skeleton|>\nclass ProductLabelsQuery:\n \"\"\"Returns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.\"\"\"\n\n def _Initialize(self):\n \"\"\"Set up the query configuration (fields, tables, etc.).\"\"\"\n <|body_0|>\n\n def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None):\n \"\"\"Retrieves a list of labels and values for a specific product.\"\"\"\n <|body_1|>\n\n def _ProcessReply(self, reply):\n \"\"\"Transforms the raw labels data into a JSON list.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProductLabelsQuery:\n \"\"\"Returns per-product labels and values usage data. After execution, the 'rows' list will contain dictionaries of labels, values and counts. This raw data will be processed by a LabelManager into a queryable structure.\"\"\"\n\n def _Initialize(self):\n \"\"\"Set up the query configuration (fields, tables, etc.).\"\"\"\n super(ProductLabelsQuery, self)._Initialize()\n self.tables = ['metadata_cube']\n self.fields = ['label', 'value', 'SUM(count) AS count']\n self.groups = ['label', 'value']\n self.orders = ['label', 'value']\n self.reply_processors += [self._ProcessReply]\n self.max_rows = 2000\n\n def Execute(self, start_date=None, end_date=None, product_name=None, test=None, metric=None):\n \"\"\"Retrieves a list of labels and values for a specific product.\"\"\"\n if start_date:\n start_date_expr = self.GetTimestampFromFilterExpression(start_date)\n self.wheres.append('day_timestamp >= %s' % start_date_expr)\n if end_date:\n end_date_expr = self.GetTimestampFromFilterExpression(end_date, True)\n self.wheres.append('day_timestamp <= %s' % end_date_expr)\n if product_name:\n self.wheres.append('product_name = \"%s\"' % product_name)\n if test:\n self.wheres.append('test = \"%s\"' % test)\n if metric:\n self.wheres.append('metric = \"%s\"' % metric)\n return super(ProductLabelsQuery, self).Execute()\n\n def _ProcessReply(self, reply):\n \"\"\"Transforms the raw labels data into a JSON list.\"\"\"\n rows = reply['rows']\n labels = label_manager.LabelManager()\n for row in rows:\n labels.AddLabel(row['label'], row['value'], row['count'])\n reply['labels'] = labels.labels\n", "source": "the_stack_v2_python_sparse", "source_path": "server/perfkit/explorer/samples_mart/product_labels.py", "source_repo": "GoogleCloudPlatform/PerfKitExplorer", "split": "test", "star_events_count": 292} {"blob_id": "912b6eb073c8bb0b49a0df46e7999a3f8716df9f", "bodies": ["eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nd_space = [100, 1000, 10000]\nfor eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n self.assertLessEqual(0, gamma)\n self.assertLessEqual(gamma, 1)\n if gamma <= np.sqrt(np.pi / (2 * (d - 1))) * (np.exp(eps) - 1) / (np.exp(eps) + 1):\n flag_16a = True\n if eps >= 0.5 * np.log(d) + np.log(6) + np.log(gamma) - (d - 1) * np.log(1 - gamma ** 2) / 2 and gamma >= np.sqrt(2 / d):\n flag_16b = True\n self.assertTrue(flag_16a or flag_16b)", "eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nd_space = [100, 1000, 10000]\np_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\nfor eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n c1, c2 = privunit.get_privunit_densities(d, gamma, p)\n self.assertLessEqual(c2, c1)", "eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\nd_space = [100, 1000, 10000]\np_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\nfor eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n m = privunit.getm(d, gamma, p)\n self.assertLessEqual(m, 1)", "eps_space = [4, 5]\nd_space = [100]\nn = 50000\nbudget = 0.5\nfor eps in eps_space:\n for d in d_space:\n x = np.random.normal(0, 1, (d, 1))\n x = np.divide(x, np.linalg.norm(x, axis=0).reshape(1, -1))\n x = np.repeat(x, n, axis=1)\n x_privunit, m = privunit.apply_privunit(x, eps, budget)\n x_avg_privunit = np.mean(x_privunit, axis=1).reshape(-1, 1)\n x_mse = np.linalg.norm(np.mean(x, axis=1).reshape(-1, 1) - x_avg_privunit) ** 2\n self.assertLessEqual(x_mse, 0.01)\n x_norm = np.linalg.norm(x_privunit, axis=0) - np.ones(n) / m\n self.assertLessEqual(np.max(x_norm), 1e-06)"], "bodies_text": "<|body_start_0|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n self.assertLessEqual(0, gamma)\n self.assertLessEqual(gamma, 1)\n if gamma <= np.sqrt(np.pi / (2 * (d - 1))) * (np.exp(eps) - 1) / (np.exp(eps) + 1):\n flag_16a = True\n if eps >= 0.5 * np.log(d) + np.log(6) + np.log(gamma) - (d - 1) * np.log(1 - gamma ** 2) / 2 and gamma >= np.sqrt(2 / d):\n flag_16b = True\n self.assertTrue(flag_16a or flag_16b)\n<|end_body_0|>\n\n<|body_start_1|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n c1, c2 = privunit.get_privunit_densities(d, gamma, p)\n self.assertLessEqual(c2, c1)\n<|end_body_1|>\n\n<|body_start_2|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n m = privunit.getm(d, gamma, p)\n self.assertLessEqual(m, 1)\n<|end_body_2|>\n\n<|body_start_3|>\n eps_space = [4, 5]\n d_space = [100]\n n = 50000\n budget = 0.5\n for eps in eps_space:\n for d in d_space:\n x = np.random.normal(0, 1, (d, 1))\n x = np.divide(x, np.linalg.norm(x, axis=0).reshape(1, -1))\n x = np.repeat(x, n, axis=1)\n x_privunit, m = privunit.apply_privunit(x, eps, budget)\n x_avg_privunit = np.mean(x_privunit, axis=1).reshape(-1, 1)\n x_mse = np.linalg.norm(np.mean(x, axis=1).reshape(-1, 1) - x_avg_privunit) ** 2\n self.assertLessEqual(x_mse, 0.01)\n x_norm = np.linalg.norm(x_privunit, axis=0) - np.ones(n) / m\n self.assertLessEqual(np.max(x_norm), 1e-06)\n<|end_body_3|>\n", "class_docstring": "", "class_name": "PrivunitTest", "detected_licenses": ["BSD-3-Clause", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PrivunitTest:\n\n def test_gamma_is_in_range(self):\n \"\"\"Test whether gamma adheres to (16a) or (16b) in the original paper.\"\"\"\n <|body_0|>\n\n def test_c2_is_less_equal_c1(self):\n \"\"\"Tests if c2 is less than or equal to c1.\"\"\"\n <|body_1|>\n\n def test_m_is_less_equal_one(self):\n \"\"\"Tests whether the inverse norm m is less than or equal to 1.\"\"\"\n <|body_2|>\n\n def test_bias_and_norm_privunit(self):\n \"\"\"Checks whether the privatized x is unbiased and has the right norm.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n self.assertLessEqual(0, gamma)\n self.assertLessEqual(gamma, 1)\n if gamma <= np.sqrt(np.pi / (2 * (d - 1))) * (np.exp(eps) - 1) / (np.exp(eps) + 1):\n flag_16a = True\n if eps >= 0.5 * np.log(d) + np.log(6) + np.log(gamma) - (d - 1) * np.log(1 - gamma ** 2) / 2 and gamma >= np.sqrt(2 / d):\n flag_16b = True\n self.assertTrue(flag_16a or flag_16b)\n<|end_body_0|>\n\n<|body_start_1|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n c1, c2 = privunit.get_privunit_densities(d, gamma, p)\n self.assertLessEqual(c2, c1)\n<|end_body_1|>\n\n<|body_start_2|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n m = privunit.getm(d, gamma, p)\n self.assertLessEqual(m, 1)\n<|end_body_2|>\n\n<|body_start_3|>\n eps_space = [4, 5]\n d_space = [100]\n n = 50000\n budget = 0.5\n for eps in eps_space:\n for d in d_space:\n x = np.random.normal(0, 1, (d, 1))\n x = np.divide(x, np.linalg.norm(x, axis=0).reshape(1, -1))\n x = np.repeat(x, n, axis=1)\n x_privunit, m = privunit.apply_privunit(x, eps, budget)\n x_avg_privunit = np.mean(x_privunit, axis=1).reshape(-1, 1)\n x_mse = np.linalg.norm(np.mean(x, axis=1).reshape(-1, 1) - x_avg_privunit) ** 2\n self.assertLessEqual(x_mse, 0.01)\n x_norm = np.linalg.norm(x_privunit, axis=0) - np.ones(n) / m\n self.assertLessEqual(np.max(x_norm), 1e-06)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000381", "length_bytes": 3348, "license_type": "permissive", "methods": [{"docstring": "Test whether gamma adheres to (16a) or (16b) in the original paper.", "name": "test_gamma_is_in_range", "signature": "def test_gamma_is_in_range(self)"}, {"docstring": "Tests if c2 is less than or equal to c1.", "name": "test_c2_is_less_equal_c1", "signature": "def test_c2_is_less_equal_c1(self)"}, {"docstring": "Tests whether the inverse norm m is less than or equal to 1.", "name": "test_m_is_less_equal_one", "signature": "def test_m_is_less_equal_one(self)"}, {"docstring": "Checks whether the privatized x is unbiased and has the right norm.", "name": "test_bias_and_norm_privunit", "signature": "def test_bias_and_norm_privunit(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_001504", "prompt": "Implement the Python class `PrivunitTest` described below.\n\nClass description:\nImplement the PrivunitTest class.\n\nMethod signatures and docstrings:\n- def test_gamma_is_in_range(self): Test whether gamma adheres to (16a) or (16b) in the original paper.\n- def test_c2_is_less_equal_c1(self): Tests if c2 is less than or equal to c1.\n- def test_m_is_less_equal_one(self): Tests whether the inverse norm m is less than or equal to 1.\n- def test_bias_and_norm_privunit(self): Checks whether the privatized x is unbiased and has the right norm.", "prompted_full_text": "Implement the Python class `PrivunitTest` described below.\n\nClass description:\nImplement the PrivunitTest class.\n\nMethod signatures and docstrings:\n- def test_gamma_is_in_range(self): Test whether gamma adheres to (16a) or (16b) in the original paper.\n- def test_c2_is_less_equal_c1(self): Tests if c2 is less than or equal to c1.\n- def test_m_is_less_equal_one(self): Tests whether the inverse norm m is less than or equal to 1.\n- def test_bias_and_norm_privunit(self): Checks whether the privatized x is unbiased and has the right norm.\n\n<|skeleton|>\nclass PrivunitTest:\n\n def test_gamma_is_in_range(self):\n \"\"\"Test whether gamma adheres to (16a) or (16b) in the original paper.\"\"\"\n <|body_0|>\n\n def test_c2_is_less_equal_c1(self):\n \"\"\"Tests if c2 is less than or equal to c1.\"\"\"\n <|body_1|>\n\n def test_m_is_less_equal_one(self):\n \"\"\"Tests whether the inverse norm m is less than or equal to 1.\"\"\"\n <|body_2|>\n\n def test_bias_and_norm_privunit(self):\n \"\"\"Checks whether the privatized x is unbiased and has the right norm.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n self.assertLessEqual(0, gamma)\n self.assertLessEqual(gamma, 1)\n if gamma <= np.sqrt(np.pi / (2 * (d - 1))) * (np.exp(eps) - 1) / (np.exp(eps) + 1):\n flag_16a = True\n if eps >= 0.5 * np.log(d) + np.log(6) + np.log(gamma) - (d - 1) * np.log(1 - gamma ** 2) / 2 and gamma >= np.sqrt(2 / d):\n flag_16b = True\n self.assertTrue(flag_16a or flag_16b)\n<|end_body_0|>\n\n<|body_start_1|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n c1, c2 = privunit.get_privunit_densities(d, gamma, p)\n self.assertLessEqual(c2, c1)\n<|end_body_1|>\n\n<|body_start_2|>\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n m = privunit.getm(d, gamma, p)\n self.assertLessEqual(m, 1)\n<|end_body_2|>\n\n<|body_start_3|>\n eps_space = [4, 5]\n d_space = [100]\n n = 50000\n budget = 0.5\n for eps in eps_space:\n for d in d_space:\n x = np.random.normal(0, 1, (d, 1))\n x = np.divide(x, np.linalg.norm(x, axis=0).reshape(1, -1))\n x = np.repeat(x, n, axis=1)\n x_privunit, m = privunit.apply_privunit(x, eps, budget)\n x_avg_privunit = np.mean(x_privunit, axis=1).reshape(-1, 1)\n x_mse = np.linalg.norm(np.mean(x, axis=1).reshape(-1, 1) - x_avg_privunit) ** 2\n self.assertLessEqual(x_mse, 0.01)\n x_norm = np.linalg.norm(x_privunit, axis=0) - np.ones(n) / m\n self.assertLessEqual(np.max(x_norm), 1e-06)\n<|end_body_3|>\n", "revision_id": "329e60fa56b87f691303638ceb9dfa1fc5083953", "skeleton": "<|skeleton|>\nclass PrivunitTest:\n\n def test_gamma_is_in_range(self):\n \"\"\"Test whether gamma adheres to (16a) or (16b) in the original paper.\"\"\"\n <|body_0|>\n\n def test_c2_is_less_equal_c1(self):\n \"\"\"Tests if c2 is less than or equal to c1.\"\"\"\n <|body_1|>\n\n def test_m_is_less_equal_one(self):\n \"\"\"Tests whether the inverse norm m is less than or equal to 1.\"\"\"\n <|body_2|>\n\n def test_bias_and_norm_privunit(self):\n \"\"\"Checks whether the privatized x is unbiased and has the right norm.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PrivunitTest:\n def test_gamma_is_in_range(self):\n \"\"\"Test whether gamma adheres to (16a) or (16b) in the original paper.\"\"\"\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n self.assertLessEqual(0, gamma)\n self.assertLessEqual(gamma, 1)\n if gamma <= np.sqrt(np.pi / (2 * (d - 1))) * (np.exp(eps) - 1) / (np.exp(eps) + 1):\n flag_16a = True\n if eps >= 0.5 * np.log(d) + np.log(6) + np.log(gamma) - (d - 1) * np.log(1 - gamma ** 2) / 2 and gamma >= np.sqrt(2 / d):\n flag_16b = True\n self.assertTrue(flag_16a or flag_16b)\n\n def test_c2_is_less_equal_c1(self):\n \"\"\"Tests if c2 is less than or equal to c1.\"\"\"\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n c1, c2 = privunit.get_privunit_densities(d, gamma, p)\n self.assertLessEqual(c2, c1)\n\n def test_m_is_less_equal_one(self):\n \"\"\"Tests whether the inverse norm m is less than or equal to 1.\"\"\"\n eps_space = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n d_space = [100, 1000, 10000]\n p_space = [0.5, 0.6, 0.7, 0.8, 0.9, 1.0]\n for eps in eps_space:\n for d in d_space:\n gamma, _ = privunit.find_best_gamma(d, eps)\n for p in p_space:\n m = privunit.getm(d, gamma, p)\n self.assertLessEqual(m, 1)\n\n def test_bias_and_norm_privunit(self):\n \"\"\"Checks whether the privatized x is unbiased and has the right norm.\"\"\"\n eps_space = [4, 5]\n d_space = [100]\n n = 50000\n budget = 0.5\n for eps in eps_space:\n for d in d_space:\n x = np.random.normal(0, 1, (d, 1))\n x = np.divide(x, np.linalg.norm(x, axis=0).reshape(1, -1))\n x = np.repeat(x, n, axis=1)\n x_privunit, m = privunit.apply_privunit(x, eps, budget)\n x_avg_privunit = np.mean(x_privunit, axis=1).reshape(-1, 1)\n x_mse = np.linalg.norm(np.mean(x, axis=1).reshape(-1, 1) - x_avg_privunit) ** 2\n self.assertLessEqual(x_mse, 0.01)\n x_norm = np.linalg.norm(x_privunit, axis=0) - np.ones(n) / m\n self.assertLessEqual(np.max(x_norm), 1e-06)\n", "source": "the_stack_v2_python_sparse", "source_path": "rcc_dp/mean_estimation/privunit_test.py", "source_repo": "google-research/federated", "split": "test", "star_events_count": 595} {"blob_id": "ec8da4473c4d2474032da25dfdb0933e226ea288", "bodies": ["result = []\nfor idx1, init_val in enumerate(nums):\n updated_arr = nums[:idx1] + nums[idx1 + 1:]\n for indx2, val in enumerate(updated_arr):\n if val + init_val == target:\n result.extend([idx1, indx2 + 1])\n return result", "hash_t = {}\nfor idx1, init_val in enumerate(nums):\n hash_t[idx1] = init_val\nfor idx, val in enumerate(nums):\n balance = target - hash_t[idx]\n if balance in hash_t.values() and hash_t.keys()[hash_t.values().index(balance)] != idx:\n return [idx, hash_t.keys()[hash_t.values().index(balance)]]", "hash_t = {}\nfor idx1, init_val in enumerate(nums):\n balance = target - init_val\n if balance in hash_t:\n return [hash_t[balance], idx1]\n hash_t[init_val] = idx1"], "bodies_text": "<|body_start_0|>\n result = []\n for idx1, init_val in enumerate(nums):\n updated_arr = nums[:idx1] + nums[idx1 + 1:]\n for indx2, val in enumerate(updated_arr):\n if val + init_val == target:\n result.extend([idx1, indx2 + 1])\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n hash_t[idx1] = init_val\n for idx, val in enumerate(nums):\n balance = target - hash_t[idx]\n if balance in hash_t.values() and hash_t.keys()[hash_t.values().index(balance)] != idx:\n return [idx, hash_t.keys()[hash_t.values().index(balance)]]\n<|end_body_1|>\n\n<|body_start_2|>\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n balance = target - init_val\n if balance in hash_t:\n return [hash_t[balance], idx1]\n hash_t[init_val] = idx1\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def twoSum1(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n def twoSum2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n for idx1, init_val in enumerate(nums):\n updated_arr = nums[:idx1] + nums[idx1 + 1:]\n for indx2, val in enumerate(updated_arr):\n if val + init_val == target:\n result.extend([idx1, indx2 + 1])\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n hash_t[idx1] = init_val\n for idx, val in enumerate(nums):\n balance = target - hash_t[idx]\n if balance in hash_t.values() and hash_t.keys()[hash_t.values().index(balance)] != idx:\n return [idx, hash_t.keys()[hash_t.values().index(balance)]]\n<|end_body_1|>\n\n<|body_start_2|>\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n balance = target - init_val\n if balance in hash_t:\n return [hash_t[balance], idx1]\n hash_t[init_val] = idx1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000382", "length_bytes": 2539, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "twoSum", "signature": "def twoSum(self, nums, target)"}, {"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "twoSum1", "signature": "def twoSum1(self, nums, target)"}, {"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "twoSum2", "signature": "def twoSum2(self, nums, target)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def twoSum(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def twoSum1(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def twoSum2(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def twoSum(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def twoSum1(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def twoSum2(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n\n<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def twoSum1(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n def twoSum2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n result = []\n for idx1, init_val in enumerate(nums):\n updated_arr = nums[:idx1] + nums[idx1 + 1:]\n for indx2, val in enumerate(updated_arr):\n if val + init_val == target:\n result.extend([idx1, indx2 + 1])\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n hash_t[idx1] = init_val\n for idx, val in enumerate(nums):\n balance = target - hash_t[idx]\n if balance in hash_t.values() and hash_t.keys()[hash_t.values().index(balance)] != idx:\n return [idx, hash_t.keys()[hash_t.values().index(balance)]]\n<|end_body_1|>\n\n<|body_start_2|>\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n balance = target - init_val\n if balance in hash_t:\n return [hash_t[balance], idx1]\n hash_t[init_val] = idx1\n<|end_body_2|>\n", "revision_id": "da4de71d349fec422c3f7595ecad72d7f0e74a04", "skeleton": "<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def twoSum1(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_1|>\n\n def twoSum2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def twoSum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n result = []\n for idx1, init_val in enumerate(nums):\n updated_arr = nums[:idx1] + nums[idx1 + 1:]\n for indx2, val in enumerate(updated_arr):\n if val + init_val == target:\n result.extend([idx1, indx2 + 1])\n return result\n\n def twoSum1(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n hash_t[idx1] = init_val\n for idx, val in enumerate(nums):\n balance = target - hash_t[idx]\n if balance in hash_t.values() and hash_t.keys()[hash_t.values().index(balance)] != idx:\n return [idx, hash_t.keys()[hash_t.values().index(balance)]]\n\n def twoSum2(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n hash_t = {}\n for idx1, init_val in enumerate(nums):\n balance = target - init_val\n if balance in hash_t:\n return [hash_t[balance], idx1]\n hash_t[init_val] = idx1\n", "source": "the_stack_v2_python_sparse", "source_path": "problems/leet_code_1_two_sum.py", "source_repo": "33percent/python-DSA", "split": "test", "star_events_count": 0} {"blob_id": "27438849b800c9fa65cba43b03818eb13c7d7d86", "bodies": ["self._vectorizer = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df, analyzer=analyzer.value)\nself._documents = documents\nself._index = self._vectorizer.fit_transform(map(text_getter, self._documents))", "query_vector = self._vectorizer.transform([query])\nscores = zip(self._documents, self._index.dot(query_vector.T).T.toarray()[0])\nfiltered_scores = (SearchResult(doc, score) for doc, score in scores if score > retrieval_threshold)\nreturn heapq.nlargest(num_results, filtered_scores, key=lambda p: p.score)"], "bodies_text": "<|body_start_0|>\n self._vectorizer = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df, analyzer=analyzer.value)\n self._documents = documents\n self._index = self._vectorizer.fit_transform(map(text_getter, self._documents))\n<|end_body_0|>\n\n<|body_start_1|>\n query_vector = self._vectorizer.transform([query])\n scores = zip(self._documents, self._index.dot(query_vector.T).T.toarray()[0])\n filtered_scores = (SearchResult(doc, score) for doc, score in scores if score > retrieval_threshold)\n return heapq.nlargest(num_results, filtered_scores, key=lambda p: p.score)\n<|end_body_1|>\n", "class_docstring": "A simple text index from a corpus of text using tf-idf similarity.", "class_name": "TextIndex", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-generic-cla"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TextIndex:\n \"\"\"A simple text index from a corpus of text using tf-idf similarity.\"\"\"\n\n def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9):\n \"\"\"Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at\"\"\"\n <|body_0|>\n\n def search(self, query, retrieval_threshold=0.0, num_results=5):\n \"\"\"Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._vectorizer = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df, analyzer=analyzer.value)\n self._documents = documents\n self._index = self._vectorizer.fit_transform(map(text_getter, self._documents))\n<|end_body_0|>\n\n<|body_start_1|>\n query_vector = self._vectorizer.transform([query])\n scores = zip(self._documents, self._index.dot(query_vector.T).T.toarray()[0])\n filtered_scores = (SearchResult(doc, score) for doc, score in scores if score > retrieval_threshold)\n return heapq.nlargest(num_results, filtered_scores, key=lambda p: p.score)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000383", "length_bytes": 4223, "license_type": "permissive", "methods": [{"docstring": "Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at", "name": "__init__", "signature": "def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9)"}, {"docstring": "Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.", "name": "search", "signature": "def search(self, query, retrieval_threshold=0.0, num_results=5)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_032953", "prompt": "Implement the Python class `TextIndex` described below.\n\nClass description:\nA simple text index from a corpus of text using tf-idf similarity.\n\nMethod signatures and docstrings:\n- def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9): Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at\n- def search(self, query, retrieval_threshold=0.0, num_results=5): Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.", "prompted_full_text": "Implement the Python class `TextIndex` described below.\n\nClass description:\nA simple text index from a corpus of text using tf-idf similarity.\n\nMethod signatures and docstrings:\n- def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9): Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at\n- def search(self, query, retrieval_threshold=0.0, num_results=5): Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.\n\n<|skeleton|>\nclass TextIndex:\n \"\"\"A simple text index from a corpus of text using tf-idf similarity.\"\"\"\n\n def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9):\n \"\"\"Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at\"\"\"\n <|body_0|>\n\n def search(self, query, retrieval_threshold=0.0, num_results=5):\n \"\"\"Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._vectorizer = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df, analyzer=analyzer.value)\n self._documents = documents\n self._index = self._vectorizer.fit_transform(map(text_getter, self._documents))\n<|end_body_0|>\n\n<|body_start_1|>\n query_vector = self._vectorizer.transform([query])\n scores = zip(self._documents, self._index.dot(query_vector.T).T.toarray()[0])\n filtered_scores = (SearchResult(doc, score) for doc, score in scores if score > retrieval_threshold)\n return heapq.nlargest(num_results, filtered_scores, key=lambda p: p.score)\n<|end_body_1|>\n", "revision_id": "569a3c31451d941165bd10783f73f494406b3906", "skeleton": "<|skeleton|>\nclass TextIndex:\n \"\"\"A simple text index from a corpus of text using tf-idf similarity.\"\"\"\n\n def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9):\n \"\"\"Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at\"\"\"\n <|body_0|>\n\n def search(self, query, retrieval_threshold=0.0, num_results=5):\n \"\"\"Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TextIndex:\n \"\"\"A simple text index from a corpus of text using tf-idf similarity.\"\"\"\n\n def __init__(self, documents, text_getter, ngram_range=(1, 2), analyzer=Analyzer.WORD, min_df=1, max_df=0.9):\n \"\"\"Init parameters for TextIndex. Args: documents: Corpus of documents to be indexed and retrieved. text_getter: Function to extract text from documents. ngram_range: tuple (min_n, max_n), default=(1, 2) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. analyzer: Analyzer, {‘word’, ‘char’, ‘char_wb’}. Whether the feature should be made of word or character n-grams. Option ‘char_wb’ creates character n-grams only from text inside word boundaries; n-grams at\"\"\"\n self._vectorizer = TfidfVectorizer(ngram_range=ngram_range, min_df=min_df, max_df=max_df, analyzer=analyzer.value)\n self._documents = documents\n self._index = self._vectorizer.fit_transform(map(text_getter, self._documents))\n\n def search(self, query, retrieval_threshold=0.0, num_results=5):\n \"\"\"Retrieve matching text in the corpus. Args: query: Text used to search for candidates in the corpus.s retrieval_threshold: Filter results above this threshold. num_results: Number of results to return. Returns: Tuple of text and float score. Top `num_results` elements in the corpus.\"\"\"\n query_vector = self._vectorizer.transform([query])\n scores = zip(self._documents, self._index.dot(query_vector.T).T.toarray()[0])\n filtered_scores = (SearchResult(doc, score) for doc, score in scores if score > retrieval_threshold)\n return heapq.nlargest(num_results, filtered_scores, key=lambda p: p.score)\n", "source": "the_stack_v2_python_sparse", "source_path": "tapas/utils/text_index.py", "source_repo": "google-research/tapas", "split": "test", "star_events_count": 1043} {"blob_id": "377c23e81f7798e0e72388efea15e7a09052751b", "bodies": ["if root in (None, p, q):\n return root\nleft = self.lowestCommonAncestor(root.left, p, q)\nright = self.lowestCommonAncestor(root.right, p, q)\nreturn root if left and right else left or right", "ans = root\nwhile ans:\n if min(p.val, q.val) > ans.val and ans.right:\n ans = ans.right\n continue\n elif max(p.val, q.val) < ans.val and ans.left:\n ans = ans.left\n continue\n return ans", "def path(root, p):\n s = []\n now = root\n while now and now.val != p.val:\n s.append(now)\n now = now.left if now.val > p.val else now.right\n s.append(now)\n return s\npathP, pathQ = (path(root, p), path(root, q))\nans, i = (None, 0)\nminlen = min(len(pathP), len(pathQ))\nwhile i < minlen and pathP[i] == pathQ[i]:\n ans = pathP[i]\n i += 1\nreturn ans"], "bodies_text": "<|body_start_0|>\n if root in (None, p, q):\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n return root if left and right else left or right\n<|end_body_0|>\n\n<|body_start_1|>\n ans = root\n while ans:\n if min(p.val, q.val) > ans.val and ans.right:\n ans = ans.right\n continue\n elif max(p.val, q.val) < ans.val and ans.left:\n ans = ans.left\n continue\n return ans\n<|end_body_1|>\n\n<|body_start_2|>\n def path(root, p):\n s = []\n now = root\n while now and now.val != p.val:\n s.append(now)\n now = now.left if now.val > p.val else now.right\n s.append(now)\n return s\n pathP, pathQ = (path(root, p), path(root, q))\n ans, i = (None, 0)\n minlen = min(len(pathP), len(pathQ))\n while i < minlen and pathP[i] == pathQ[i]:\n ans = pathP[i]\n i += 1\n return ans\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def lowestCommonAncestor_recursively(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def lowestCommonAncestor_BSTway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n def lowestCommonAncestor_pathway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root in (None, p, q):\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n return root if left and right else left or right\n<|end_body_0|>\n\n<|body_start_1|>\n ans = root\n while ans:\n if min(p.val, q.val) > ans.val and ans.right:\n ans = ans.right\n continue\n elif max(p.val, q.val) < ans.val and ans.left:\n ans = ans.left\n continue\n return ans\n<|end_body_1|>\n\n<|body_start_2|>\n def path(root, p):\n s = []\n now = root\n while now and now.val != p.val:\n s.append(now)\n now = now.left if now.val > p.val else now.right\n s.append(now)\n return s\n pathP, pathQ = (path(root, p), path(root, q))\n ans, i = (None, 0)\n minlen = min(len(pathP), len(pathQ))\n while i < minlen and pathP[i] == pathQ[i]:\n ans = pathP[i]\n i += 1\n return ans\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000384", "length_bytes": 2167, "license_type": "permissive", "methods": [{"docstring": ":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode", "name": "lowestCommonAncestor_recursively", "signature": "def lowestCommonAncestor_recursively(self, root, p, q)"}, {"docstring": ":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode", "name": "lowestCommonAncestor_BSTway", "signature": "def lowestCommonAncestor_BSTway(self, root, p, q)"}, {"docstring": ":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode", "name": "lowestCommonAncestor_pathway", "signature": "def lowestCommonAncestor_pathway(self, root, p, q)"}], "n_methods": 3, "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lowestCommonAncestor_recursively(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n- def lowestCommonAncestor_BSTway(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n- def lowestCommonAncestor_pathway(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def lowestCommonAncestor_recursively(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n- def lowestCommonAncestor_BSTway(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n- def lowestCommonAncestor_pathway(self, root, p, q): :type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\n\n<|skeleton|>\nclass Solution:\n\n def lowestCommonAncestor_recursively(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def lowestCommonAncestor_BSTway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n def lowestCommonAncestor_pathway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if root in (None, p, q):\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n return root if left and right else left or right\n<|end_body_0|>\n\n<|body_start_1|>\n ans = root\n while ans:\n if min(p.val, q.val) > ans.val and ans.right:\n ans = ans.right\n continue\n elif max(p.val, q.val) < ans.val and ans.left:\n ans = ans.left\n continue\n return ans\n<|end_body_1|>\n\n<|body_start_2|>\n def path(root, p):\n s = []\n now = root\n while now and now.val != p.val:\n s.append(now)\n now = now.left if now.val > p.val else now.right\n s.append(now)\n return s\n pathP, pathQ = (path(root, p), path(root, q))\n ans, i = (None, 0)\n minlen = min(len(pathP), len(pathQ))\n while i < minlen and pathP[i] == pathQ[i]:\n ans = pathP[i]\n i += 1\n return ans\n<|end_body_2|>\n", "revision_id": "38eb0556f865fd06f517ca45253d00aaca39d70b", "skeleton": "<|skeleton|>\nclass Solution:\n\n def lowestCommonAncestor_recursively(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_0|>\n\n def lowestCommonAncestor_BSTway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_1|>\n\n def lowestCommonAncestor_pathway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def lowestCommonAncestor_recursively(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n if root in (None, p, q):\n return root\n left = self.lowestCommonAncestor(root.left, p, q)\n right = self.lowestCommonAncestor(root.right, p, q)\n return root if left and right else left or right\n\n def lowestCommonAncestor_BSTway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n ans = root\n while ans:\n if min(p.val, q.val) > ans.val and ans.right:\n ans = ans.right\n continue\n elif max(p.val, q.val) < ans.val and ans.left:\n ans = ans.left\n continue\n return ans\n\n def lowestCommonAncestor_pathway(self, root, p, q):\n \"\"\":type root: TreeNode :type p: TreeNode :type q: TreeNode :rtype: TreeNode\"\"\"\n def path(root, p):\n s = []\n now = root\n while now and now.val != p.val:\n s.append(now)\n now = now.left if now.val > p.val else now.right\n s.append(now)\n return s\n pathP, pathQ = (path(root, p), path(root, q))\n ans, i = (None, 0)\n minlen = min(len(pathP), len(pathQ))\n while i < minlen and pathP[i] == pathQ[i]:\n ans = pathP[i]\n i += 1\n return ans\n", "source": "the_stack_v2_python_sparse", "source_path": "Python3/no235_LCA_of_a_BST.py", "source_repo": "yif042/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "9007721e19917484f8150fdae2ac03a9cae6355b", "bodies": ["self.enable_system_backup = enable_system_backup\nself.file_backup_params = file_backup_params\nself.snapshot_params = snapshot_params\nself.source_app_params = source_app_params\nself.volume_guid_vec = volume_guid_vec", "if dictionary is None:\n return None\nenable_system_backup = dictionary.get('enableSystemBackup')\nfile_backup_params = cohesity_management_sdk.models.physical_file_backup_params.PhysicalFileBackupParams.from_dictionary(dictionary.get('fileBackupParams')) if dictionary.get('fileBackupParams') else None\nsnapshot_params = cohesity_management_sdk.models.physical_snapshot_params.PhysicalSnapshotParams.from_dictionary(dictionary.get('snapshotParams')) if dictionary.get('snapshotParams') else None\nsource_app_params = cohesity_management_sdk.models.source_app_params.SourceAppParams.from_dictionary(dictionary.get('sourceAppParams')) if dictionary.get('sourceAppParams') else None\nvolume_guid_vec = dictionary.get('volumeGuidVec')\nreturn cls(enable_system_backup, file_backup_params, snapshot_params, source_app_params, volume_guid_vec)"], "bodies_text": "<|body_start_0|>\n self.enable_system_backup = enable_system_backup\n self.file_backup_params = file_backup_params\n self.snapshot_params = snapshot_params\n self.source_app_params = source_app_params\n self.volume_guid_vec = volume_guid_vec\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n enable_system_backup = dictionary.get('enableSystemBackup')\n file_backup_params = cohesity_management_sdk.models.physical_file_backup_params.PhysicalFileBackupParams.from_dictionary(dictionary.get('fileBackupParams')) if dictionary.get('fileBackupParams') else None\n snapshot_params = cohesity_management_sdk.models.physical_snapshot_params.PhysicalSnapshotParams.from_dictionary(dictionary.get('snapshotParams')) if dictionary.get('snapshotParams') else None\n source_app_params = cohesity_management_sdk.models.source_app_params.SourceAppParams.from_dictionary(dictionary.get('sourceAppParams')) if dictionary.get('sourceAppParams') else None\n volume_guid_vec = dictionary.get('volumeGuidVec')\n return cls(enable_system_backup, file_backup_params, snapshot_params, source_app_params, volume_guid_vec)\n<|end_body_1|>\n", "class_docstring": "Implementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri", "class_name": "PhysicalBackupSourceParams", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass PhysicalBackupSourceParams:\n \"\"\"Implementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri\"\"\"\n\n def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None):\n \"\"\"Constructor for the PhysicalBackupSourceParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.enable_system_backup = enable_system_backup\n self.file_backup_params = file_backup_params\n self.snapshot_params = snapshot_params\n self.source_app_params = source_app_params\n self.volume_guid_vec = volume_guid_vec\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n enable_system_backup = dictionary.get('enableSystemBackup')\n file_backup_params = cohesity_management_sdk.models.physical_file_backup_params.PhysicalFileBackupParams.from_dictionary(dictionary.get('fileBackupParams')) if dictionary.get('fileBackupParams') else None\n snapshot_params = cohesity_management_sdk.models.physical_snapshot_params.PhysicalSnapshotParams.from_dictionary(dictionary.get('snapshotParams')) if dictionary.get('snapshotParams') else None\n source_app_params = cohesity_management_sdk.models.source_app_params.SourceAppParams.from_dictionary(dictionary.get('sourceAppParams')) if dictionary.get('sourceAppParams') else None\n volume_guid_vec = dictionary.get('volumeGuidVec')\n return cls(enable_system_backup, file_backup_params, snapshot_params, source_app_params, volume_guid_vec)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000385", "length_bytes": 3743, "license_type": "permissive", "methods": [{"docstring": "Constructor for the PhysicalBackupSourceParams class", "name": "__init__", "signature": "def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None)"}, {"docstring": "Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "name": "from_dictionary", "signature": "def from_dictionary(cls, dictionary)"}], "n_methods": 2, "prompt": "Implement the Python class `PhysicalBackupSourceParams` described below.\n\nClass description:\nImplementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri\n\nMethod signatures and docstrings:\n- def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None): Constructor for the PhysicalBackupSourceParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.", "prompted_full_text": "Implement the Python class `PhysicalBackupSourceParams` described below.\n\nClass description:\nImplementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri\n\nMethod signatures and docstrings:\n- def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None): Constructor for the PhysicalBackupSourceParams class\n- def from_dictionary(cls, dictionary): Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\n\n<|skeleton|>\nclass PhysicalBackupSourceParams:\n \"\"\"Implementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri\"\"\"\n\n def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None):\n \"\"\"Constructor for the PhysicalBackupSourceParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.enable_system_backup = enable_system_backup\n self.file_backup_params = file_backup_params\n self.snapshot_params = snapshot_params\n self.source_app_params = source_app_params\n self.volume_guid_vec = volume_guid_vec\n<|end_body_0|>\n\n<|body_start_1|>\n if dictionary is None:\n return None\n enable_system_backup = dictionary.get('enableSystemBackup')\n file_backup_params = cohesity_management_sdk.models.physical_file_backup_params.PhysicalFileBackupParams.from_dictionary(dictionary.get('fileBackupParams')) if dictionary.get('fileBackupParams') else None\n snapshot_params = cohesity_management_sdk.models.physical_snapshot_params.PhysicalSnapshotParams.from_dictionary(dictionary.get('snapshotParams')) if dictionary.get('snapshotParams') else None\n source_app_params = cohesity_management_sdk.models.source_app_params.SourceAppParams.from_dictionary(dictionary.get('sourceAppParams')) if dictionary.get('sourceAppParams') else None\n volume_guid_vec = dictionary.get('volumeGuidVec')\n return cls(enable_system_backup, file_backup_params, snapshot_params, source_app_params, volume_guid_vec)\n<|end_body_1|>\n", "revision_id": "e4973dfeb836266904d0369ea845513c7acf261e", "skeleton": "<|skeleton|>\nclass PhysicalBackupSourceParams:\n \"\"\"Implementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri\"\"\"\n\n def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None):\n \"\"\"Constructor for the PhysicalBackupSourceParams class\"\"\"\n <|body_0|>\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class PhysicalBackupSourceParams:\n \"\"\"Implementation of the 'PhysicalBackupSourceParams' model. Message to capture additional backup params for a Physical type source. Attributes: enable_system_backup (bool): Allows Magneto to drive a \"system\" backup using a 3rd-party tool installed on the Agent host. file_backup_params (PhysicalFileBackupParams): If the backup job type is kPhysicalFiles, this field should be set to specify the files to be backed up from the volumes on this source. snapshot_params (PhysicalSnapshotParams): This captures params to use for taking the snapshot. source_app_params (SourceAppParams): This message will capture params for applications that are running as part of the server. volume_guid_vec (list of stri\"\"\"\n\n def __init__(self, enable_system_backup=None, file_backup_params=None, snapshot_params=None, source_app_params=None, volume_guid_vec=None):\n \"\"\"Constructor for the PhysicalBackupSourceParams class\"\"\"\n self.enable_system_backup = enable_system_backup\n self.file_backup_params = file_backup_params\n self.snapshot_params = snapshot_params\n self.source_app_params = source_app_params\n self.volume_guid_vec = volume_guid_vec\n\n def from_dictionary(cls, dictionary):\n \"\"\"Creates an instance of this model from a dictionary Args: dictionary (dictionary): A dictionary representation of the object as obtained from the deserialization of the server's response. The keys MUST match property names in the API description. Returns: object: An instance of this structure class.\"\"\"\n if dictionary is None:\n return None\n enable_system_backup = dictionary.get('enableSystemBackup')\n file_backup_params = cohesity_management_sdk.models.physical_file_backup_params.PhysicalFileBackupParams.from_dictionary(dictionary.get('fileBackupParams')) if dictionary.get('fileBackupParams') else None\n snapshot_params = cohesity_management_sdk.models.physical_snapshot_params.PhysicalSnapshotParams.from_dictionary(dictionary.get('snapshotParams')) if dictionary.get('snapshotParams') else None\n source_app_params = cohesity_management_sdk.models.source_app_params.SourceAppParams.from_dictionary(dictionary.get('sourceAppParams')) if dictionary.get('sourceAppParams') else None\n volume_guid_vec = dictionary.get('volumeGuidVec')\n return cls(enable_system_backup, file_backup_params, snapshot_params, source_app_params, volume_guid_vec)\n", "source": "the_stack_v2_python_sparse", "source_path": "cohesity_management_sdk/models/physical_backup_source_params.py", "source_repo": "cohesity/management-sdk-python", "split": "test", "star_events_count": 24} {"blob_id": "096b99a19923ac32eb08a9ce3ab32b8c964c62f4", "bodies": ["self.path = os.path.normpath(path)\nself.name = string.replace(os.path.basename(os.path.normpath(path)), '.cfg', '')\nconfigdir = os.path.dirname(self.path)\nif not os.path.exists(configdir):\n os.makedirs(configdir)\nif not os.path.exists(self.path):\n f = open(self.path, 'w')\n f.close()\nself.update()", "self.parser = SafeConfigParser()\nconfighandle = open(self.path, 'r')\nself.parser.readfp(confighandle, self.path)\nconfighandle.close()"], "bodies_text": "<|body_start_0|>\n self.path = os.path.normpath(path)\n self.name = string.replace(os.path.basename(os.path.normpath(path)), '.cfg', '')\n configdir = os.path.dirname(self.path)\n if not os.path.exists(configdir):\n os.makedirs(configdir)\n if not os.path.exists(self.path):\n f = open(self.path, 'w')\n f.close()\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.parser = SafeConfigParser()\n confighandle = open(self.path, 'r')\n self.parser.readfp(confighandle, self.path)\n confighandle.close()\n<|end_body_1|>\n", "class_docstring": "", "class_name": "ConfigWrapper", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigWrapper:\n\n def __init__(self, path):\n \"\"\"Creates a object which parsed the file 'path' and created Config.parser\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Reparses the file to have the latest information. Will replace current parser so save changes beforehand\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.path = os.path.normpath(path)\n self.name = string.replace(os.path.basename(os.path.normpath(path)), '.cfg', '')\n configdir = os.path.dirname(self.path)\n if not os.path.exists(configdir):\n os.makedirs(configdir)\n if not os.path.exists(self.path):\n f = open(self.path, 'w')\n f.close()\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.parser = SafeConfigParser()\n confighandle = open(self.path, 'r')\n self.parser.readfp(confighandle, self.path)\n confighandle.close()\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000386", "length_bytes": 945, "license_type": "no_license", "methods": [{"docstring": "Creates a object which parsed the file 'path' and created Config.parser", "name": "__init__", "signature": "def __init__(self, path)"}, {"docstring": "Reparses the file to have the latest information. Will replace current parser so save changes beforehand", "name": "update", "signature": "def update(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_050019", "prompt": "Implement the Python class `ConfigWrapper` described below.\n\nClass description:\nImplement the ConfigWrapper class.\n\nMethod signatures and docstrings:\n- def __init__(self, path): Creates a object which parsed the file 'path' and created Config.parser\n- def update(self): Reparses the file to have the latest information. Will replace current parser so save changes beforehand", "prompted_full_text": "Implement the Python class `ConfigWrapper` described below.\n\nClass description:\nImplement the ConfigWrapper class.\n\nMethod signatures and docstrings:\n- def __init__(self, path): Creates a object which parsed the file 'path' and created Config.parser\n- def update(self): Reparses the file to have the latest information. Will replace current parser so save changes beforehand\n\n<|skeleton|>\nclass ConfigWrapper:\n\n def __init__(self, path):\n \"\"\"Creates a object which parsed the file 'path' and created Config.parser\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Reparses the file to have the latest information. Will replace current parser so save changes beforehand\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.path = os.path.normpath(path)\n self.name = string.replace(os.path.basename(os.path.normpath(path)), '.cfg', '')\n configdir = os.path.dirname(self.path)\n if not os.path.exists(configdir):\n os.makedirs(configdir)\n if not os.path.exists(self.path):\n f = open(self.path, 'w')\n f.close()\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.parser = SafeConfigParser()\n confighandle = open(self.path, 'r')\n self.parser.readfp(confighandle, self.path)\n confighandle.close()\n<|end_body_1|>\n", "revision_id": "0cf22df3b59809e6fe24435f0faa9f091635c392", "skeleton": "<|skeleton|>\nclass ConfigWrapper:\n\n def __init__(self, path):\n \"\"\"Creates a object which parsed the file 'path' and created Config.parser\"\"\"\n <|body_0|>\n\n def update(self):\n \"\"\"Reparses the file to have the latest information. Will replace current parser so save changes beforehand\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConfigWrapper:\n def __init__(self, path):\n \"\"\"Creates a object which parsed the file 'path' and created Config.parser\"\"\"\n self.path = os.path.normpath(path)\n self.name = string.replace(os.path.basename(os.path.normpath(path)), '.cfg', '')\n configdir = os.path.dirname(self.path)\n if not os.path.exists(configdir):\n os.makedirs(configdir)\n if not os.path.exists(self.path):\n f = open(self.path, 'w')\n f.close()\n self.update()\n\n def update(self):\n \"\"\"Reparses the file to have the latest information. Will replace current parser so save changes beforehand\"\"\"\n self.parser = SafeConfigParser()\n confighandle = open(self.path, 'r')\n self.parser.readfp(confighandle, self.path)\n confighandle.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "ManageStudy/Config/ConfigWrapper.py", "source_repo": "steffejr/CogRes_import", "split": "test", "star_events_count": 0} {"blob_id": "aeb0a583c34a1a7501b2837907cb24b43602a7a9", "bodies": ["try:\n params = request._serialize()\n headers = request.headers\n body = self.call('RecommendContent', params, headers=headers)\n response = json.loads(body)\n model = models.RecommendContentResponse()\n model._deserialize(response['Response'])\n return model\nexcept Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportAction', params, headers=headers)\n response = json.loads(body)\n model = models.ReportActionResponse()\n model._deserialize(response['Response'])\n return model\nexcept Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportMaterial', params, headers=headers)\n response = json.loads(body)\n model = models.ReportMaterialResponse()\n model._deserialize(response['Response'])\n return model\nexcept Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportPortrait', params, headers=headers)\n response = json.loads(body)\n model = models.ReportPortraitResponse()\n model._deserialize(response['Response'])\n return model\nexcept Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))"], "bodies_text": "<|body_start_0|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('RecommendContent', params, headers=headers)\n response = json.loads(body)\n model = models.RecommendContentResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportAction', params, headers=headers)\n response = json.loads(body)\n model = models.ReportActionResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportMaterial', params, headers=headers)\n response = json.loads(body)\n model = models.ReportMaterialResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportPortrait', params, headers=headers)\n response = json.loads(body)\n model = models.ReportPortraitResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_3|>\n", "class_docstring": "", "class_name": "IrpClient", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IrpClient:\n\n def RecommendContent(self, request):\n \"\"\"获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`\"\"\"\n <|body_0|>\n\n def ReportAction(self, request):\n \"\"\"上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`\"\"\"\n <|body_1|>\n\n def ReportMaterial(self, request):\n \"\"\"上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`\"\"\"\n <|body_2|>\n\n def ReportPortrait(self, request):\n \"\"\"上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('RecommendContent', params, headers=headers)\n response = json.loads(body)\n model = models.RecommendContentResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportAction', params, headers=headers)\n response = json.loads(body)\n model = models.ReportActionResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportMaterial', params, headers=headers)\n response = json.loads(body)\n model = models.ReportMaterialResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportPortrait', params, headers=headers)\n response = json.loads(body)\n model = models.ReportPortraitResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000387", "length_bytes": 4389, "license_type": "permissive", "methods": [{"docstring": "获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`", "name": "RecommendContent", "signature": "def RecommendContent(self, request)"}, {"docstring": "上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`", "name": "ReportAction", "signature": "def ReportAction(self, request)"}, {"docstring": "上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`", "name": "ReportMaterial", "signature": "def ReportMaterial(self, request)"}, {"docstring": "上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`", "name": "ReportPortrait", "signature": "def ReportPortrait(self, request)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_050432", "prompt": "Implement the Python class `IrpClient` described below.\n\nClass description:\nImplement the IrpClient class.\n\nMethod signatures and docstrings:\n- def RecommendContent(self, request): 获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`\n- def ReportAction(self, request): 上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`\n- def ReportMaterial(self, request): 上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`\n- def ReportPortrait(self, request): 上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`", "prompted_full_text": "Implement the Python class `IrpClient` described below.\n\nClass description:\nImplement the IrpClient class.\n\nMethod signatures and docstrings:\n- def RecommendContent(self, request): 获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`\n- def ReportAction(self, request): 上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`\n- def ReportMaterial(self, request): 上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`\n- def ReportPortrait(self, request): 上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`\n\n<|skeleton|>\nclass IrpClient:\n\n def RecommendContent(self, request):\n \"\"\"获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`\"\"\"\n <|body_0|>\n\n def ReportAction(self, request):\n \"\"\"上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`\"\"\"\n <|body_1|>\n\n def ReportMaterial(self, request):\n \"\"\"上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`\"\"\"\n <|body_2|>\n\n def ReportPortrait(self, request):\n \"\"\"上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('RecommendContent', params, headers=headers)\n response = json.loads(body)\n model = models.RecommendContentResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportAction', params, headers=headers)\n response = json.loads(body)\n model = models.ReportActionResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_1|>\n\n<|body_start_2|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportMaterial', params, headers=headers)\n response = json.loads(body)\n model = models.ReportMaterialResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportPortrait', params, headers=headers)\n response = json.loads(body)\n model = models.ReportPortraitResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n<|end_body_3|>\n", "revision_id": "6baf00a5a56ba58b6a1123423e0a1422d17a0201", "skeleton": "<|skeleton|>\nclass IrpClient:\n\n def RecommendContent(self, request):\n \"\"\"获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`\"\"\"\n <|body_0|>\n\n def ReportAction(self, request):\n \"\"\"上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`\"\"\"\n <|body_1|>\n\n def ReportMaterial(self, request):\n \"\"\"上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`\"\"\"\n <|body_2|>\n\n def ReportPortrait(self, request):\n \"\"\"上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IrpClient:\n def RecommendContent(self, request):\n \"\"\"获取推荐结果 :param request: Request instance for RecommendContent. :type request: :class:`tencentcloud.irp.v20220324.models.RecommendContentRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.RecommendContentResponse`\"\"\"\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('RecommendContent', params, headers=headers)\n response = json.loads(body)\n model = models.RecommendContentResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n\n def ReportAction(self, request):\n \"\"\"上报行为 :param request: Request instance for ReportAction. :type request: :class:`tencentcloud.irp.v20220324.models.ReportActionRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportActionResponse`\"\"\"\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportAction', params, headers=headers)\n response = json.loads(body)\n model = models.ReportActionResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n\n def ReportMaterial(self, request):\n \"\"\"上报物料 :param request: Request instance for ReportMaterial. :type request: :class:`tencentcloud.irp.v20220324.models.ReportMaterialRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportMaterialResponse`\"\"\"\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportMaterial', params, headers=headers)\n response = json.loads(body)\n model = models.ReportMaterialResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n\n def ReportPortrait(self, request):\n \"\"\"上报用户画像 :param request: Request instance for ReportPortrait. :type request: :class:`tencentcloud.irp.v20220324.models.ReportPortraitRequest` :rtype: :class:`tencentcloud.irp.v20220324.models.ReportPortraitResponse`\"\"\"\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call('ReportPortrait', params, headers=headers)\n response = json.loads(body)\n model = models.ReportPortraitResponse()\n model._deserialize(response['Response'])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))\n", "source": "the_stack_v2_python_sparse", "source_path": "tencentcloud/irp/v20220324/irp_client.py", "source_repo": "TencentCloud/tencentcloud-sdk-python", "split": "test", "star_events_count": 594} {"blob_id": "e929af2a02fe83be318f1f57b1a34fd68de7ddc3", "bodies": ["self.width = width\nself.height = height\nself.pace = 1\nself.size = 10\nself.red = 0\nself.green = 0\nself.blue = 0\nself.x = int(random.random() * self.width)\nself.y = int(random.random() * self.height)\ndirection = [1, -1]\nrandom.shuffle(direction)\nself.dx = direction[0]\nrandom.shuffle(direction)\nself.dy = direction[0]", "if not width is None:\n self.setWidth(width)\nif not height is None:\n self.setHeight(height)\nhalf = int(self.size * 1.6 / 2)\nself.x = self.x + self.pace * self.dx\nself.y = self.y + self.pace * self.dy\nif self.y <= 0 + half:\n self.dy *= -1\nelif self.y >= self.height - half:\n self.dy *= -1\nif self.x <= 0 + half:\n self.dx *= -1\nelif self.x >= self.height - half:\n self.dx *= -1\nreturn (self.x, self.y)"], "bodies_text": "<|body_start_0|>\n self.width = width\n self.height = height\n self.pace = 1\n self.size = 10\n self.red = 0\n self.green = 0\n self.blue = 0\n self.x = int(random.random() * self.width)\n self.y = int(random.random() * self.height)\n direction = [1, -1]\n random.shuffle(direction)\n self.dx = direction[0]\n random.shuffle(direction)\n self.dy = direction[0]\n<|end_body_0|>\n\n<|body_start_1|>\n if not width is None:\n self.setWidth(width)\n if not height is None:\n self.setHeight(height)\n half = int(self.size * 1.6 / 2)\n self.x = self.x + self.pace * self.dx\n self.y = self.y + self.pace * self.dy\n if self.y <= 0 + half:\n self.dy *= -1\n elif self.y >= self.height - half:\n self.dy *= -1\n if self.x <= 0 + half:\n self.dx *= -1\n elif self.x >= self.height - half:\n self.dx *= -1\n return (self.x, self.y)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "GameBall", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GameBall:\n\n def __init__(self, pace=1, size=10, width=500, height=500):\n \"\"\"Constructor This method creates a ball with params that are pre-picked for now to keep it simple.\"\"\"\n <|body_0|>\n\n def move(self, width=None, height=None):\n \"\"\"Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.width = width\n self.height = height\n self.pace = 1\n self.size = 10\n self.red = 0\n self.green = 0\n self.blue = 0\n self.x = int(random.random() * self.width)\n self.y = int(random.random() * self.height)\n direction = [1, -1]\n random.shuffle(direction)\n self.dx = direction[0]\n random.shuffle(direction)\n self.dy = direction[0]\n<|end_body_0|>\n\n<|body_start_1|>\n if not width is None:\n self.setWidth(width)\n if not height is None:\n self.setHeight(height)\n half = int(self.size * 1.6 / 2)\n self.x = self.x + self.pace * self.dx\n self.y = self.y + self.pace * self.dy\n if self.y <= 0 + half:\n self.dy *= -1\n elif self.y >= self.height - half:\n self.dy *= -1\n if self.x <= 0 + half:\n self.dx *= -1\n elif self.x >= self.height - half:\n self.dx *= -1\n return (self.x, self.y)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000388", "length_bytes": 4256, "license_type": "no_license", "methods": [{"docstring": "Constructor This method creates a ball with params that are pre-picked for now to keep it simple.", "name": "__init__", "signature": "def __init__(self, pace=1, size=10, width=500, height=500)"}, {"docstring": "Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window", "name": "move", "signature": "def move(self, width=None, height=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_008326", "prompt": "Implement the Python class `GameBall` described below.\n\nClass description:\nImplement the GameBall class.\n\nMethod signatures and docstrings:\n- def __init__(self, pace=1, size=10, width=500, height=500): Constructor This method creates a ball with params that are pre-picked for now to keep it simple.\n- def move(self, width=None, height=None): Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window", "prompted_full_text": "Implement the Python class `GameBall` described below.\n\nClass description:\nImplement the GameBall class.\n\nMethod signatures and docstrings:\n- def __init__(self, pace=1, size=10, width=500, height=500): Constructor This method creates a ball with params that are pre-picked for now to keep it simple.\n- def move(self, width=None, height=None): Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window\n\n<|skeleton|>\nclass GameBall:\n\n def __init__(self, pace=1, size=10, width=500, height=500):\n \"\"\"Constructor This method creates a ball with params that are pre-picked for now to keep it simple.\"\"\"\n <|body_0|>\n\n def move(self, width=None, height=None):\n \"\"\"Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.width = width\n self.height = height\n self.pace = 1\n self.size = 10\n self.red = 0\n self.green = 0\n self.blue = 0\n self.x = int(random.random() * self.width)\n self.y = int(random.random() * self.height)\n direction = [1, -1]\n random.shuffle(direction)\n self.dx = direction[0]\n random.shuffle(direction)\n self.dy = direction[0]\n<|end_body_0|>\n\n<|body_start_1|>\n if not width is None:\n self.setWidth(width)\n if not height is None:\n self.setHeight(height)\n half = int(self.size * 1.6 / 2)\n self.x = self.x + self.pace * self.dx\n self.y = self.y + self.pace * self.dy\n if self.y <= 0 + half:\n self.dy *= -1\n elif self.y >= self.height - half:\n self.dy *= -1\n if self.x <= 0 + half:\n self.dx *= -1\n elif self.x >= self.height - half:\n self.dx *= -1\n return (self.x, self.y)\n<|end_body_1|>\n", "revision_id": "ae1496234bb05b77933819c2054607f0513878f1", "skeleton": "<|skeleton|>\nclass GameBall:\n\n def __init__(self, pace=1, size=10, width=500, height=500):\n \"\"\"Constructor This method creates a ball with params that are pre-picked for now to keep it simple.\"\"\"\n <|body_0|>\n\n def move(self, width=None, height=None):\n \"\"\"Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GameBall:\n def __init__(self, pace=1, size=10, width=500, height=500):\n \"\"\"Constructor This method creates a ball with params that are pre-picked for now to keep it simple.\"\"\"\n self.width = width\n self.height = height\n self.pace = 1\n self.size = 10\n self.red = 0\n self.green = 0\n self.blue = 0\n self.x = int(random.random() * self.width)\n self.y = int(random.random() * self.height)\n direction = [1, -1]\n random.shuffle(direction)\n self.dx = direction[0]\n random.shuffle(direction)\n self.dy = direction[0]\n\n def move(self, width=None, height=None):\n \"\"\"Description: move - updates a balls location based on current position and current direction Params: width [int] : width of window height [int] : height of window\"\"\"\n if not width is None:\n self.setWidth(width)\n if not height is None:\n self.setHeight(height)\n half = int(self.size * 1.6 / 2)\n self.x = self.x + self.pace * self.dx\n self.y = self.y + self.pace * self.dy\n if self.y <= 0 + half:\n self.dy *= -1\n elif self.y >= self.height - half:\n self.dy *= -1\n if self.x <= 0 + half:\n self.dx *= -1\n elif self.x >= self.height - half:\n self.dx *= -1\n return (self.x, self.y)\n", "source": "the_stack_v2_python_sparse", "source_path": "Resources/10-PyGame/05_pygame_example.py", "source_repo": "AllysonKapers/2143-Object-Oriented-Programming", "split": "test", "star_events_count": 1} {"blob_id": "9c82e85aef149e48556d92afbf473b7c33b72592", "bodies": ["errors: dict[str, str] = {}\nhost: str = data[CONF_HOST]\nport: int = data[CONF_PORT]\nusername: str = data[CONF_USERNAME]\npassword: str = data[CONF_PASSWORD]\nverify_ssl: bool = data[CONF_VERIFY_SSL]\nuptime_robot_api = UptimeKuma(async_get_clientsession(self.hass), f'{host}:{port}', username, password, verify_ssl)\ntry:\n await uptime_robot_api.async_get_monitors()\nexcept UptimeKumaException as exception:\n LOGGER.error(exception)\n errors['base'] = 'cannot_connect'\nexcept Exception as exception:\n LOGGER.exception(exception)\n errors['base'] = 'unknown'\nreturn errors", "if user_input is None:\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA)\nerrors = await self._validate_input(user_input)\nif not errors:\n unique_id = f'{user_input[CONF_HOST]}:{user_input[CONF_PORT]}'\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()\n return self.async_create_entry(title=unique_id, data=user_input)\nreturn self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA, errors=errors)"], "bodies_text": "<|body_start_0|>\n errors: dict[str, str] = {}\n host: str = data[CONF_HOST]\n port: int = data[CONF_PORT]\n username: str = data[CONF_USERNAME]\n password: str = data[CONF_PASSWORD]\n verify_ssl: bool = data[CONF_VERIFY_SSL]\n uptime_robot_api = UptimeKuma(async_get_clientsession(self.hass), f'{host}:{port}', username, password, verify_ssl)\n try:\n await uptime_robot_api.async_get_monitors()\n except UptimeKumaException as exception:\n LOGGER.error(exception)\n errors['base'] = 'cannot_connect'\n except Exception as exception:\n LOGGER.exception(exception)\n errors['base'] = 'unknown'\n return errors\n<|end_body_0|>\n\n<|body_start_1|>\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA)\n errors = await self._validate_input(user_input)\n if not errors:\n unique_id = f'{user_input[CONF_HOST]}:{user_input[CONF_PORT]}'\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()\n return self.async_create_entry(title=unique_id, data=user_input)\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA, errors=errors)\n<|end_body_1|>\n", "class_docstring": "Handle a config flow for Uptime Kuma.", "class_name": "ConfigFlow", "detected_licenses": ["Unlicense"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConfigFlow:\n \"\"\"Handle a config flow for Uptime Kuma.\"\"\"\n\n async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]:\n \"\"\"Validate the user input allows us to connect.\"\"\"\n <|body_0|>\n\n async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult:\n \"\"\"Handle the initial step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n errors: dict[str, str] = {}\n host: str = data[CONF_HOST]\n port: int = data[CONF_PORT]\n username: str = data[CONF_USERNAME]\n password: str = data[CONF_PASSWORD]\n verify_ssl: bool = data[CONF_VERIFY_SSL]\n uptime_robot_api = UptimeKuma(async_get_clientsession(self.hass), f'{host}:{port}', username, password, verify_ssl)\n try:\n await uptime_robot_api.async_get_monitors()\n except UptimeKumaException as exception:\n LOGGER.error(exception)\n errors['base'] = 'cannot_connect'\n except Exception as exception:\n LOGGER.exception(exception)\n errors['base'] = 'unknown'\n return errors\n<|end_body_0|>\n\n<|body_start_1|>\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA)\n errors = await self._validate_input(user_input)\n if not errors:\n unique_id = f'{user_input[CONF_HOST]}:{user_input[CONF_PORT]}'\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()\n return self.async_create_entry(title=unique_id, data=user_input)\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA, errors=errors)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000389", "length_bytes": 2760, "license_type": "permissive", "methods": [{"docstring": "Validate the user input allows us to connect.", "name": "_validate_input", "signature": "async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]"}, {"docstring": "Handle the initial step.", "name": "async_step_user", "signature": "async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000569", "prompt": "Implement the Python class `ConfigFlow` described below.\n\nClass description:\nHandle a config flow for Uptime Kuma.\n\nMethod signatures and docstrings:\n- async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]: Validate the user input allows us to connect.\n- async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult: Handle the initial step.", "prompted_full_text": "Implement the Python class `ConfigFlow` described below.\n\nClass description:\nHandle a config flow for Uptime Kuma.\n\nMethod signatures and docstrings:\n- async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]: Validate the user input allows us to connect.\n- async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult: Handle the initial step.\n\n<|skeleton|>\nclass ConfigFlow:\n \"\"\"Handle a config flow for Uptime Kuma.\"\"\"\n\n async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]:\n \"\"\"Validate the user input allows us to connect.\"\"\"\n <|body_0|>\n\n async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult:\n \"\"\"Handle the initial step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n errors: dict[str, str] = {}\n host: str = data[CONF_HOST]\n port: int = data[CONF_PORT]\n username: str = data[CONF_USERNAME]\n password: str = data[CONF_PASSWORD]\n verify_ssl: bool = data[CONF_VERIFY_SSL]\n uptime_robot_api = UptimeKuma(async_get_clientsession(self.hass), f'{host}:{port}', username, password, verify_ssl)\n try:\n await uptime_robot_api.async_get_monitors()\n except UptimeKumaException as exception:\n LOGGER.error(exception)\n errors['base'] = 'cannot_connect'\n except Exception as exception:\n LOGGER.exception(exception)\n errors['base'] = 'unknown'\n return errors\n<|end_body_0|>\n\n<|body_start_1|>\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA)\n errors = await self._validate_input(user_input)\n if not errors:\n unique_id = f'{user_input[CONF_HOST]}:{user_input[CONF_PORT]}'\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()\n return self.async_create_entry(title=unique_id, data=user_input)\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA, errors=errors)\n<|end_body_1|>\n", "revision_id": "8548d9999ddd54f13d6a307e013abcb8c897a74e", "skeleton": "<|skeleton|>\nclass ConfigFlow:\n \"\"\"Handle a config flow for Uptime Kuma.\"\"\"\n\n async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]:\n \"\"\"Validate the user input allows us to connect.\"\"\"\n <|body_0|>\n\n async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult:\n \"\"\"Handle the initial step.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConfigFlow:\n \"\"\"Handle a config flow for Uptime Kuma.\"\"\"\n\n async def _validate_input(self, data: dict[str, Any]) -> tuple[dict[str, str], None]:\n \"\"\"Validate the user input allows us to connect.\"\"\"\n errors: dict[str, str] = {}\n host: str = data[CONF_HOST]\n port: int = data[CONF_PORT]\n username: str = data[CONF_USERNAME]\n password: str = data[CONF_PASSWORD]\n verify_ssl: bool = data[CONF_VERIFY_SSL]\n uptime_robot_api = UptimeKuma(async_get_clientsession(self.hass), f'{host}:{port}', username, password, verify_ssl)\n try:\n await uptime_robot_api.async_get_monitors()\n except UptimeKumaException as exception:\n LOGGER.error(exception)\n errors['base'] = 'cannot_connect'\n except Exception as exception:\n LOGGER.exception(exception)\n errors['base'] = 'unknown'\n return errors\n\n async def async_step_user(self, user_input: dict[str, Any] | None=None) -> FlowResult:\n \"\"\"Handle the initial step.\"\"\"\n if user_input is None:\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA)\n errors = await self._validate_input(user_input)\n if not errors:\n unique_id = f'{user_input[CONF_HOST]}:{user_input[CONF_PORT]}'\n await self.async_set_unique_id(unique_id)\n self._abort_if_unique_id_configured()\n return self.async_create_entry(title=unique_id, data=user_input)\n return self.async_show_form(step_id='user', data_schema=STEP_USER_DATA_SCHEMA, errors=errors)\n", "source": "the_stack_v2_python_sparse", "source_path": "custom_components/uptime_kuma/config_flow.py", "source_repo": "bacco007/HomeAssistantConfig", "split": "test", "star_events_count": 98} {"blob_id": "80347d5591f3c1760d6be2e64e2ba82d5fbcbf76", "bodies": ["self.shape = (filtW, filtH, fmapsIn, fmapsOut)\nself.pool_sz = pool_sz\n'\\n NOTE: the * in *self.shape unpacks the tuple, since np.random.randn\\n takes dimensions as seperate integer arguments, not a tuple like zeros.\\n '\nW = np.random.randn(*self.shape) * np.sqrt(2.0 / np.prod(self.shape[:-1]))\nself.W = tf.Variable(W.astype(np.float32))\nb = np.zeros(fmapsOut, dtype=np.float32)\nself.b = tf.Variable(b)\nself.params = [self.W, self.b]", "conv_out = tf.nn.conv2d(X, self.W, [1, 1, 1, 1], 'SAME')\nconv_out = tf.nn.bias_add(conv_out, self.b)\np = self.pool_sz\npool_out = tf.nn.max_pool(conv_out, [1, p, p, 1], [1, p, p, 1], 'SAME')\nreturn pool_out"], "bodies_text": "<|body_start_0|>\n self.shape = (filtW, filtH, fmapsIn, fmapsOut)\n self.pool_sz = pool_sz\n '\\n NOTE: the * in *self.shape unpacks the tuple, since np.random.randn\\n takes dimensions as seperate integer arguments, not a tuple like zeros.\\n '\n W = np.random.randn(*self.shape) * np.sqrt(2.0 / np.prod(self.shape[:-1]))\n self.W = tf.Variable(W.astype(np.float32))\n b = np.zeros(fmapsOut, dtype=np.float32)\n self.b = tf.Variable(b)\n self.params = [self.W, self.b]\n<|end_body_0|>\n\n<|body_start_1|>\n conv_out = tf.nn.conv2d(X, self.W, [1, 1, 1, 1], 'SAME')\n conv_out = tf.nn.bias_add(conv_out, self.b)\n p = self.pool_sz\n pool_out = tf.nn.max_pool(conv_out, [1, p, p, 1], [1, p, p, 1], 'SAME')\n return pool_out\n<|end_body_1|>\n", "class_docstring": "Generate a new convolution and maxpool layer.", "class_name": "ConvPoolLayer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConvPoolLayer:\n \"\"\"Generate a new convolution and maxpool layer.\"\"\"\n\n def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2):\n \"\"\"Initialize filters\"\"\"\n <|body_0|>\n\n def forward(self, X):\n \"\"\"Apply convolutions and max pooling\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.shape = (filtW, filtH, fmapsIn, fmapsOut)\n self.pool_sz = pool_sz\n '\\n NOTE: the * in *self.shape unpacks the tuple, since np.random.randn\\n takes dimensions as seperate integer arguments, not a tuple like zeros.\\n '\n W = np.random.randn(*self.shape) * np.sqrt(2.0 / np.prod(self.shape[:-1]))\n self.W = tf.Variable(W.astype(np.float32))\n b = np.zeros(fmapsOut, dtype=np.float32)\n self.b = tf.Variable(b)\n self.params = [self.W, self.b]\n<|end_body_0|>\n\n<|body_start_1|>\n conv_out = tf.nn.conv2d(X, self.W, [1, 1, 1, 1], 'SAME')\n conv_out = tf.nn.bias_add(conv_out, self.b)\n p = self.pool_sz\n pool_out = tf.nn.max_pool(conv_out, [1, p, p, 1], [1, p, p, 1], 'SAME')\n return pool_out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000390", "length_bytes": 11943, "license_type": "no_license", "methods": [{"docstring": "Initialize filters", "name": "__init__", "signature": "def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2)"}, {"docstring": "Apply convolutions and max pooling", "name": "forward", "signature": "def forward(self, X)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020969", "prompt": "Implement the Python class `ConvPoolLayer` described below.\n\nClass description:\nGenerate a new convolution and maxpool layer.\n\nMethod signatures and docstrings:\n- def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2): Initialize filters\n- def forward(self, X): Apply convolutions and max pooling", "prompted_full_text": "Implement the Python class `ConvPoolLayer` described below.\n\nClass description:\nGenerate a new convolution and maxpool layer.\n\nMethod signatures and docstrings:\n- def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2): Initialize filters\n- def forward(self, X): Apply convolutions and max pooling\n\n<|skeleton|>\nclass ConvPoolLayer:\n \"\"\"Generate a new convolution and maxpool layer.\"\"\"\n\n def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2):\n \"\"\"Initialize filters\"\"\"\n <|body_0|>\n\n def forward(self, X):\n \"\"\"Apply convolutions and max pooling\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.shape = (filtW, filtH, fmapsIn, fmapsOut)\n self.pool_sz = pool_sz\n '\\n NOTE: the * in *self.shape unpacks the tuple, since np.random.randn\\n takes dimensions as seperate integer arguments, not a tuple like zeros.\\n '\n W = np.random.randn(*self.shape) * np.sqrt(2.0 / np.prod(self.shape[:-1]))\n self.W = tf.Variable(W.astype(np.float32))\n b = np.zeros(fmapsOut, dtype=np.float32)\n self.b = tf.Variable(b)\n self.params = [self.W, self.b]\n<|end_body_0|>\n\n<|body_start_1|>\n conv_out = tf.nn.conv2d(X, self.W, [1, 1, 1, 1], 'SAME')\n conv_out = tf.nn.bias_add(conv_out, self.b)\n p = self.pool_sz\n pool_out = tf.nn.max_pool(conv_out, [1, p, p, 1], [1, p, p, 1], 'SAME')\n return pool_out\n<|end_body_1|>\n", "revision_id": "0aa9fceb9ac4f42c9fe5f257a40ca7193f8073a0", "skeleton": "<|skeleton|>\nclass ConvPoolLayer:\n \"\"\"Generate a new convolution and maxpool layer.\"\"\"\n\n def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2):\n \"\"\"Initialize filters\"\"\"\n <|body_0|>\n\n def forward(self, X):\n \"\"\"Apply convolutions and max pooling\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConvPoolLayer:\n \"\"\"Generate a new convolution and maxpool layer.\"\"\"\n\n def __init__(self, filtW, filtH, fmapsIn, fmapsOut, pool_sz=2):\n \"\"\"Initialize filters\"\"\"\n self.shape = (filtW, filtH, fmapsIn, fmapsOut)\n self.pool_sz = pool_sz\n '\\n NOTE: the * in *self.shape unpacks the tuple, since np.random.randn\\n takes dimensions as seperate integer arguments, not a tuple like zeros.\\n '\n W = np.random.randn(*self.shape) * np.sqrt(2.0 / np.prod(self.shape[:-1]))\n self.W = tf.Variable(W.astype(np.float32))\n b = np.zeros(fmapsOut, dtype=np.float32)\n self.b = tf.Variable(b)\n self.params = [self.W, self.b]\n\n def forward(self, X):\n \"\"\"Apply convolutions and max pooling\"\"\"\n conv_out = tf.nn.conv2d(X, self.W, [1, 1, 1, 1], 'SAME')\n conv_out = tf.nn.bias_add(conv_out, self.b)\n p = self.pool_sz\n pool_out = tf.nn.max_pool(conv_out, [1, p, p, 1], [1, p, p, 1], 'SAME')\n return pool_out\n", "source": "the_stack_v2_python_sparse", "source_path": "LP_convolution/facial_recognition/face_tf_conv.py", "source_repo": "geoffder/learning", "split": "test", "star_events_count": 1} {"blob_id": "259a4cb844eaf532764c6e12ee052b15b2b8642e", "bodies": ["current = self.GetValue()\ndirectory = os.path.split(current)\nif os.path.isdir(current):\n directory = current\n current = ''\nelif directory and os.path.isdir(directory[0]):\n current = directory[1]\n directory = directory[0]\nelse:\n directory = self.startDirectory\n current = ''\ndlg = wx.FileDialog(self, self.dialogTitle, directory, current, self.fileMask, self.fileMode)\nif dlg.ShowModal() == wx.ID_OK:\n self.SetValue(dlg.GetPaths())\ndlg.Destroy()", "save = self.callCallback\nself.callCallback = callBack\nself.textControl.SetValue(' '.join(['\"' + _ + '\"' for _ in value]))\nself.callCallback = save"], "bodies_text": "<|body_start_0|>\n current = self.GetValue()\n directory = os.path.split(current)\n if os.path.isdir(current):\n directory = current\n current = ''\n elif directory and os.path.isdir(directory[0]):\n current = directory[1]\n directory = directory[0]\n else:\n directory = self.startDirectory\n current = ''\n dlg = wx.FileDialog(self, self.dialogTitle, directory, current, self.fileMask, self.fileMode)\n if dlg.ShowModal() == wx.ID_OK:\n self.SetValue(dlg.GetPaths())\n dlg.Destroy()\n<|end_body_0|>\n\n<|body_start_1|>\n save = self.callCallback\n self.callCallback = callBack\n self.textControl.SetValue(' '.join(['\"' + _ + '\"' for _ in value]))\n self.callCallback = save\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MultipleFileBrowseButton", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MultipleFileBrowseButton:\n\n def OnBrowse(self, event=None):\n \"\"\"Going to browse for file...\"\"\"\n <|body_0|>\n\n def SetValue(self, value, callBack=1):\n \"\"\"set current value of text control\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n current = self.GetValue()\n directory = os.path.split(current)\n if os.path.isdir(current):\n directory = current\n current = ''\n elif directory and os.path.isdir(directory[0]):\n current = directory[1]\n directory = directory[0]\n else:\n directory = self.startDirectory\n current = ''\n dlg = wx.FileDialog(self, self.dialogTitle, directory, current, self.fileMask, self.fileMode)\n if dlg.ShowModal() == wx.ID_OK:\n self.SetValue(dlg.GetPaths())\n dlg.Destroy()\n<|end_body_0|>\n\n<|body_start_1|>\n save = self.callCallback\n self.callCallback = callBack\n self.textControl.SetValue(' '.join(['\"' + _ + '\"' for _ in value]))\n self.callCallback = save\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000391", "length_bytes": 13292, "license_type": "permissive", "methods": [{"docstring": "Going to browse for file...", "name": "OnBrowse", "signature": "def OnBrowse(self, event=None)"}, {"docstring": "set current value of text control", "name": "SetValue", "signature": "def SetValue(self, value, callBack=1)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020033", "prompt": "Implement the Python class `MultipleFileBrowseButton` described below.\n\nClass description:\nImplement the MultipleFileBrowseButton class.\n\nMethod signatures and docstrings:\n- def OnBrowse(self, event=None): Going to browse for file...\n- def SetValue(self, value, callBack=1): set current value of text control", "prompted_full_text": "Implement the Python class `MultipleFileBrowseButton` described below.\n\nClass description:\nImplement the MultipleFileBrowseButton class.\n\nMethod signatures and docstrings:\n- def OnBrowse(self, event=None): Going to browse for file...\n- def SetValue(self, value, callBack=1): set current value of text control\n\n<|skeleton|>\nclass MultipleFileBrowseButton:\n\n def OnBrowse(self, event=None):\n \"\"\"Going to browse for file...\"\"\"\n <|body_0|>\n\n def SetValue(self, value, callBack=1):\n \"\"\"set current value of text control\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n current = self.GetValue()\n directory = os.path.split(current)\n if os.path.isdir(current):\n directory = current\n current = ''\n elif directory and os.path.isdir(directory[0]):\n current = directory[1]\n directory = directory[0]\n else:\n directory = self.startDirectory\n current = ''\n dlg = wx.FileDialog(self, self.dialogTitle, directory, current, self.fileMask, self.fileMode)\n if dlg.ShowModal() == wx.ID_OK:\n self.SetValue(dlg.GetPaths())\n dlg.Destroy()\n<|end_body_0|>\n\n<|body_start_1|>\n save = self.callCallback\n self.callCallback = callBack\n self.textControl.SetValue(' '.join(['\"' + _ + '\"' for _ in value]))\n self.callCallback = save\n<|end_body_1|>\n", "revision_id": "465dbfaa94da1f068d9a64b552eac71ab10e17d9", "skeleton": "<|skeleton|>\nclass MultipleFileBrowseButton:\n\n def OnBrowse(self, event=None):\n \"\"\"Going to browse for file...\"\"\"\n <|body_0|>\n\n def SetValue(self, value, callBack=1):\n \"\"\"set current value of text control\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MultipleFileBrowseButton:\n def OnBrowse(self, event=None):\n \"\"\"Going to browse for file...\"\"\"\n current = self.GetValue()\n directory = os.path.split(current)\n if os.path.isdir(current):\n directory = current\n current = ''\n elif directory and os.path.isdir(directory[0]):\n current = directory[1]\n directory = directory[0]\n else:\n directory = self.startDirectory\n current = ''\n dlg = wx.FileDialog(self, self.dialogTitle, directory, current, self.fileMask, self.fileMode)\n if dlg.ShowModal() == wx.ID_OK:\n self.SetValue(dlg.GetPaths())\n dlg.Destroy()\n\n def SetValue(self, value, callBack=1):\n \"\"\"set current value of text control\"\"\"\n save = self.callCallback\n self.callCallback = callBack\n self.textControl.SetValue(' '.join(['\"' + _ + '\"' for _ in value]))\n self.callCallback = save\n", "source": "the_stack_v2_python_sparse", "source_path": "md_davis/gui/residue_panel.py", "source_repo": "djmaity/md-davis", "split": "test", "star_events_count": 7} {"blob_id": "eada2109c0190049e11c95631815c18793b478b6", "bodies": ["batch = batch.to(self.device)\nnoisy_wavs, lens = batch.noisy_sig\nnoisy_feats = self.compute_feats(noisy_wavs)\nmask = self.modules.model(noisy_feats)\npredict_spec = torch.mul(mask, noisy_feats)\npredict_wav = self.hparams.resynth(torch.expm1(predict_spec), noisy_wavs)\nreturn {'spec': predict_spec, 'wav': predict_wav}", "feats = self.hparams.compute_STFT(wavs)\nfeats = sb.processing.features.spectral_magnitude(feats, power=0.5)\nfeats = torch.log1p(feats)\nreturn feats", "clean_wavs, lens = batch.clean_sig\nclean_spec = self.compute_feats(clean_wavs)\nloss = sb.nnet.losses.mse_loss(predictions['spec'], clean_spec, lens)\nself.loss_metric.append(batch.id, predictions['spec'], clean_spec, lens, reduction='batch')\nif stage != sb.Stage.TRAIN:\n self.stoi_metric.append(batch.id, predictions['wav'], clean_wavs, lens, reduction='batch')\nreturn loss", "self.loss_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.losses.mse_loss)\nif stage != sb.Stage.TRAIN:\n self.stoi_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.loss.stoi_loss.stoi_loss)", "if stage == sb.Stage.TRAIN:\n self.train_loss = stage_loss\nelse:\n stats = {'loss': stage_loss, 'stoi': -self.stoi_metric.summarize('average')}\nif stage == sb.Stage.VALID:\n self.hparams.train_logger.log_stats({'Epoch': epoch}, train_stats={'loss': self.train_loss}, valid_stats=stats)\n self.checkpointer.save_and_keep_only(meta=stats, max_keys=['stoi'])\nif stage == sb.Stage.TEST:\n self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stats)"], "bodies_text": "<|body_start_0|>\n batch = batch.to(self.device)\n noisy_wavs, lens = batch.noisy_sig\n noisy_feats = self.compute_feats(noisy_wavs)\n mask = self.modules.model(noisy_feats)\n predict_spec = torch.mul(mask, noisy_feats)\n predict_wav = self.hparams.resynth(torch.expm1(predict_spec), noisy_wavs)\n return {'spec': predict_spec, 'wav': predict_wav}\n<|end_body_0|>\n\n<|body_start_1|>\n feats = self.hparams.compute_STFT(wavs)\n feats = sb.processing.features.spectral_magnitude(feats, power=0.5)\n feats = torch.log1p(feats)\n return feats\n<|end_body_1|>\n\n<|body_start_2|>\n clean_wavs, lens = batch.clean_sig\n clean_spec = self.compute_feats(clean_wavs)\n loss = sb.nnet.losses.mse_loss(predictions['spec'], clean_spec, lens)\n self.loss_metric.append(batch.id, predictions['spec'], clean_spec, lens, reduction='batch')\n if stage != sb.Stage.TRAIN:\n self.stoi_metric.append(batch.id, predictions['wav'], clean_wavs, lens, reduction='batch')\n return loss\n<|end_body_2|>\n\n<|body_start_3|>\n self.loss_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.losses.mse_loss)\n if stage != sb.Stage.TRAIN:\n self.stoi_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.loss.stoi_loss.stoi_loss)\n<|end_body_3|>\n\n<|body_start_4|>\n if stage == sb.Stage.TRAIN:\n self.train_loss = stage_loss\n else:\n stats = {'loss': stage_loss, 'stoi': -self.stoi_metric.summarize('average')}\n if stage == sb.Stage.VALID:\n self.hparams.train_logger.log_stats({'Epoch': epoch}, train_stats={'loss': self.train_loss}, valid_stats=stats)\n self.checkpointer.save_and_keep_only(meta=stats, max_keys=['stoi'])\n if stage == sb.Stage.TEST:\n self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stats)\n<|end_body_4|>\n", "class_docstring": "Class that manages the training loop. See speechbrain.core.Brain.", "class_name": "SEBrain", "detected_licenses": ["Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference", "GPL-1.0-or-later", "LicenseRef-scancode-other-permissive"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SEBrain:\n \"\"\"Class that manages the training loop. See speechbrain.core.Brain.\"\"\"\n\n def compute_forward(self, batch, stage):\n \"\"\"Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.\"\"\"\n <|body_0|>\n\n def compute_feats(self, wavs):\n \"\"\"Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.\"\"\"\n <|body_1|>\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.\"\"\"\n <|body_2|>\n\n def on_stage_start(self, stage, epoch=None):\n \"\"\"Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n <|body_3|>\n\n def on_stage_end(self, stage, stage_loss, epoch=None):\n \"\"\"Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n batch = batch.to(self.device)\n noisy_wavs, lens = batch.noisy_sig\n noisy_feats = self.compute_feats(noisy_wavs)\n mask = self.modules.model(noisy_feats)\n predict_spec = torch.mul(mask, noisy_feats)\n predict_wav = self.hparams.resynth(torch.expm1(predict_spec), noisy_wavs)\n return {'spec': predict_spec, 'wav': predict_wav}\n<|end_body_0|>\n\n<|body_start_1|>\n feats = self.hparams.compute_STFT(wavs)\n feats = sb.processing.features.spectral_magnitude(feats, power=0.5)\n feats = torch.log1p(feats)\n return feats\n<|end_body_1|>\n\n<|body_start_2|>\n clean_wavs, lens = batch.clean_sig\n clean_spec = self.compute_feats(clean_wavs)\n loss = sb.nnet.losses.mse_loss(predictions['spec'], clean_spec, lens)\n self.loss_metric.append(batch.id, predictions['spec'], clean_spec, lens, reduction='batch')\n if stage != sb.Stage.TRAIN:\n self.stoi_metric.append(batch.id, predictions['wav'], clean_wavs, lens, reduction='batch')\n return loss\n<|end_body_2|>\n\n<|body_start_3|>\n self.loss_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.losses.mse_loss)\n if stage != sb.Stage.TRAIN:\n self.stoi_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.loss.stoi_loss.stoi_loss)\n<|end_body_3|>\n\n<|body_start_4|>\n if stage == sb.Stage.TRAIN:\n self.train_loss = stage_loss\n else:\n stats = {'loss': stage_loss, 'stoi': -self.stoi_metric.summarize('average')}\n if stage == sb.Stage.VALID:\n self.hparams.train_logger.log_stats({'Epoch': epoch}, train_stats={'loss': self.train_loss}, valid_stats=stats)\n self.checkpointer.save_and_keep_only(meta=stats, max_keys=['stoi'])\n if stage == sb.Stage.TEST:\n self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stats)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000392", "length_bytes": 11127, "license_type": "permissive", "methods": [{"docstring": "Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.", "name": "compute_forward", "signature": "def compute_forward(self, batch, stage)"}, {"docstring": "Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.", "name": "compute_feats", "signature": "def compute_feats(self, wavs)"}, {"docstring": "Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.", "name": "compute_objectives", "signature": "def compute_objectives(self, predictions, batch, stage)"}, {"docstring": "Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.", "name": "on_stage_start", "signature": "def on_stage_start(self, stage, epoch=None)"}, {"docstring": "Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.", "name": "on_stage_end", "signature": "def on_stage_end(self, stage, stage_loss, epoch=None)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_014371", "prompt": "Implement the Python class `SEBrain` described below.\n\nClass description:\nClass that manages the training loop. See speechbrain.core.Brain.\n\nMethod signatures and docstrings:\n- def compute_forward(self, batch, stage): Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.\n- def compute_feats(self, wavs): Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.\n- def compute_objectives(self, predictions, batch, stage): Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.\n- def on_stage_start(self, stage, epoch=None): Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\n- def on_stage_end(self, stage, stage_loss, epoch=None): Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.", "prompted_full_text": "Implement the Python class `SEBrain` described below.\n\nClass description:\nClass that manages the training loop. See speechbrain.core.Brain.\n\nMethod signatures and docstrings:\n- def compute_forward(self, batch, stage): Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.\n- def compute_feats(self, wavs): Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.\n- def compute_objectives(self, predictions, batch, stage): Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.\n- def on_stage_start(self, stage, epoch=None): Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\n- def on_stage_end(self, stage, stage_loss, epoch=None): Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\n\n<|skeleton|>\nclass SEBrain:\n \"\"\"Class that manages the training loop. See speechbrain.core.Brain.\"\"\"\n\n def compute_forward(self, batch, stage):\n \"\"\"Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.\"\"\"\n <|body_0|>\n\n def compute_feats(self, wavs):\n \"\"\"Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.\"\"\"\n <|body_1|>\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.\"\"\"\n <|body_2|>\n\n def on_stage_start(self, stage, epoch=None):\n \"\"\"Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n <|body_3|>\n\n def on_stage_end(self, stage, stage_loss, epoch=None):\n \"\"\"Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n batch = batch.to(self.device)\n noisy_wavs, lens = batch.noisy_sig\n noisy_feats = self.compute_feats(noisy_wavs)\n mask = self.modules.model(noisy_feats)\n predict_spec = torch.mul(mask, noisy_feats)\n predict_wav = self.hparams.resynth(torch.expm1(predict_spec), noisy_wavs)\n return {'spec': predict_spec, 'wav': predict_wav}\n<|end_body_0|>\n\n<|body_start_1|>\n feats = self.hparams.compute_STFT(wavs)\n feats = sb.processing.features.spectral_magnitude(feats, power=0.5)\n feats = torch.log1p(feats)\n return feats\n<|end_body_1|>\n\n<|body_start_2|>\n clean_wavs, lens = batch.clean_sig\n clean_spec = self.compute_feats(clean_wavs)\n loss = sb.nnet.losses.mse_loss(predictions['spec'], clean_spec, lens)\n self.loss_metric.append(batch.id, predictions['spec'], clean_spec, lens, reduction='batch')\n if stage != sb.Stage.TRAIN:\n self.stoi_metric.append(batch.id, predictions['wav'], clean_wavs, lens, reduction='batch')\n return loss\n<|end_body_2|>\n\n<|body_start_3|>\n self.loss_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.losses.mse_loss)\n if stage != sb.Stage.TRAIN:\n self.stoi_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.loss.stoi_loss.stoi_loss)\n<|end_body_3|>\n\n<|body_start_4|>\n if stage == sb.Stage.TRAIN:\n self.train_loss = stage_loss\n else:\n stats = {'loss': stage_loss, 'stoi': -self.stoi_metric.summarize('average')}\n if stage == sb.Stage.VALID:\n self.hparams.train_logger.log_stats({'Epoch': epoch}, train_stats={'loss': self.train_loss}, valid_stats=stats)\n self.checkpointer.save_and_keep_only(meta=stats, max_keys=['stoi'])\n if stage == sb.Stage.TEST:\n self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stats)\n<|end_body_4|>\n", "revision_id": "92acc188d3a0f634de58463b6676e70df83ef808", "skeleton": "<|skeleton|>\nclass SEBrain:\n \"\"\"Class that manages the training loop. See speechbrain.core.Brain.\"\"\"\n\n def compute_forward(self, batch, stage):\n \"\"\"Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.\"\"\"\n <|body_0|>\n\n def compute_feats(self, wavs):\n \"\"\"Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.\"\"\"\n <|body_1|>\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.\"\"\"\n <|body_2|>\n\n def on_stage_start(self, stage, epoch=None):\n \"\"\"Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n <|body_3|>\n\n def on_stage_end(self, stage, stage_loss, epoch=None):\n \"\"\"Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SEBrain:\n \"\"\"Class that manages the training loop. See speechbrain.core.Brain.\"\"\"\n\n def compute_forward(self, batch, stage):\n \"\"\"Apply masking to convert from noisy waveforms to enhanced signals. Arguments --------- batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- predictions : dict A dictionary with keys {\"spec\", \"wav\"} with predicted features.\"\"\"\n batch = batch.to(self.device)\n noisy_wavs, lens = batch.noisy_sig\n noisy_feats = self.compute_feats(noisy_wavs)\n mask = self.modules.model(noisy_feats)\n predict_spec = torch.mul(mask, noisy_feats)\n predict_wav = self.hparams.resynth(torch.expm1(predict_spec), noisy_wavs)\n return {'spec': predict_spec, 'wav': predict_wav}\n\n def compute_feats(self, wavs):\n \"\"\"Returns corresponding log-spectral features of the input waveforms. Arguments --------- wavs : torch.Tensor The batch of waveforms to convert to log-spectral features.\"\"\"\n feats = self.hparams.compute_STFT(wavs)\n feats = sb.processing.features.spectral_magnitude(feats, power=0.5)\n feats = torch.log1p(feats)\n return feats\n\n def compute_objectives(self, predictions, batch, stage):\n \"\"\"Computes the loss given the predicted and targeted outputs. Arguments --------- predictions : dict The output dict from `compute_forward`. batch : PaddedBatch This batch object contains all the relevant tensors for computation. stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. Returns ------- loss : torch.Tensor A one-element tensor used for backpropagating the gradient.\"\"\"\n clean_wavs, lens = batch.clean_sig\n clean_spec = self.compute_feats(clean_wavs)\n loss = sb.nnet.losses.mse_loss(predictions['spec'], clean_spec, lens)\n self.loss_metric.append(batch.id, predictions['spec'], clean_spec, lens, reduction='batch')\n if stage != sb.Stage.TRAIN:\n self.stoi_metric.append(batch.id, predictions['wav'], clean_wavs, lens, reduction='batch')\n return loss\n\n def on_stage_start(self, stage, epoch=None):\n \"\"\"Gets called at the beginning of each epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, or sb.Stage.TEST. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n self.loss_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.losses.mse_loss)\n if stage != sb.Stage.TRAIN:\n self.stoi_metric = sb.utils.metric_stats.MetricStats(metric=sb.nnet.loss.stoi_loss.stoi_loss)\n\n def on_stage_end(self, stage, stage_loss, epoch=None):\n \"\"\"Gets called at the end of an epoch. Arguments --------- stage : sb.Stage One of sb.Stage.TRAIN, sb.Stage.VALID, sb.Stage.TEST stage_loss : float The average loss for all of the data processed in this stage. epoch : int The currently-starting epoch. This is passed `None` during the test stage.\"\"\"\n if stage == sb.Stage.TRAIN:\n self.train_loss = stage_loss\n else:\n stats = {'loss': stage_loss, 'stoi': -self.stoi_metric.summarize('average')}\n if stage == sb.Stage.VALID:\n self.hparams.train_logger.log_stats({'Epoch': epoch}, train_stats={'loss': self.train_loss}, valid_stats=stats)\n self.checkpointer.save_and_keep_only(meta=stats, max_keys=['stoi'])\n if stage == sb.Stage.TEST:\n self.hparams.train_logger.log_stats({'Epoch loaded': self.hparams.epoch_counter.current}, test_stats=stats)\n", "source": "the_stack_v2_python_sparse", "source_path": "PyTorch/dev/perf/speechbrain-tdnn/templates/enhancement/train.py", "source_repo": "Ascend/ModelZoo-PyTorch", "split": "test", "star_events_count": 23} {"blob_id": "4bc4453d2fe97b44145676ea5811d538a6d710a2", "bodies": ["if not value:\n return None\nreturn value.isoformat()", "if not value:\n return None\nreturn parse(value)"], "bodies_text": "<|body_start_0|>\n if not value:\n return None\n return value.isoformat()\n<|end_body_0|>\n\n<|body_start_1|>\n if not value:\n return None\n return parse(value)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FuzzyDate", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FuzzyDate:\n\n def _serialize(self, value, attr, obj):\n \"\"\"Convert a Python object into an outside-world object\"\"\"\n <|body_0|>\n\n def _deserialize(self, value, attr, obj):\n \"\"\"Convert a outside-world value into a Python object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not value:\n return None\n return value.isoformat()\n<|end_body_0|>\n\n<|body_start_1|>\n if not value:\n return None\n return parse(value)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000393", "length_bytes": 2881, "license_type": "permissive", "methods": [{"docstring": "Convert a Python object into an outside-world object", "name": "_serialize", "signature": "def _serialize(self, value, attr, obj)"}, {"docstring": "Convert a outside-world value into a Python object", "name": "_deserialize", "signature": "def _deserialize(self, value, attr, obj)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006175", "prompt": "Implement the Python class `FuzzyDate` described below.\n\nClass description:\nImplement the FuzzyDate class.\n\nMethod signatures and docstrings:\n- def _serialize(self, value, attr, obj): Convert a Python object into an outside-world object\n- def _deserialize(self, value, attr, obj): Convert a outside-world value into a Python object", "prompted_full_text": "Implement the Python class `FuzzyDate` described below.\n\nClass description:\nImplement the FuzzyDate class.\n\nMethod signatures and docstrings:\n- def _serialize(self, value, attr, obj): Convert a Python object into an outside-world object\n- def _deserialize(self, value, attr, obj): Convert a outside-world value into a Python object\n\n<|skeleton|>\nclass FuzzyDate:\n\n def _serialize(self, value, attr, obj):\n \"\"\"Convert a Python object into an outside-world object\"\"\"\n <|body_0|>\n\n def _deserialize(self, value, attr, obj):\n \"\"\"Convert a outside-world value into a Python object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not value:\n return None\n return value.isoformat()\n<|end_body_0|>\n\n<|body_start_1|>\n if not value:\n return None\n return parse(value)\n<|end_body_1|>\n", "revision_id": "87c211fa6cf9708bdf3fc4b736f3cca450c0a290", "skeleton": "<|skeleton|>\nclass FuzzyDate:\n\n def _serialize(self, value, attr, obj):\n \"\"\"Convert a Python object into an outside-world object\"\"\"\n <|body_0|>\n\n def _deserialize(self, value, attr, obj):\n \"\"\"Convert a outside-world value into a Python object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FuzzyDate:\n def _serialize(self, value, attr, obj):\n \"\"\"Convert a Python object into an outside-world object\"\"\"\n if not value:\n return None\n return value.isoformat()\n\n def _deserialize(self, value, attr, obj):\n \"\"\"Convert a outside-world value into a Python object\"\"\"\n if not value:\n return None\n return parse(value)\n", "source": "the_stack_v2_python_sparse", "source_path": "powonline/schema.py", "source_repo": "exhuma/powonline", "split": "test", "star_events_count": 0} {"blob_id": "98c95d12a1a5052ddb2cd68071ec561f279da5f9", "bodies": ["pivot = 0\nmaxL = A[0]\nmax_last = 0\nfor i, num in enumerate(A):\n max_last = max(max_last, num)\n if num < maxL:\n pivot = i\n maxL = max_last\nreturn pivot + 1", "info = sorted(([num, i] for i, num in enumerate(A)))\ns = 0\nfor index, (num, i) in enumerate(info):\n s += i\n if s == (index + 1) * index / 2:\n return index + 1"], "bodies_text": "<|body_start_0|>\n pivot = 0\n maxL = A[0]\n max_last = 0\n for i, num in enumerate(A):\n max_last = max(max_last, num)\n if num < maxL:\n pivot = i\n maxL = max_last\n return pivot + 1\n<|end_body_0|>\n\n<|body_start_1|>\n info = sorted(([num, i] for i, num in enumerate(A)))\n s = 0\n for index, (num, i) in enumerate(info):\n s += i\n if s == (index + 1) * index / 2:\n return index + 1\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def partitionDisjoint(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def partitionDisjoint2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pivot = 0\n maxL = A[0]\n max_last = 0\n for i, num in enumerate(A):\n max_last = max(max_last, num)\n if num < maxL:\n pivot = i\n maxL = max_last\n return pivot + 1\n<|end_body_0|>\n\n<|body_start_1|>\n info = sorted(([num, i] for i, num in enumerate(A)))\n s = 0\n for index, (num, i) in enumerate(info):\n s += i\n if s == (index + 1) * index / 2:\n return index + 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000394", "length_bytes": 959, "license_type": "no_license", "methods": [{"docstring": ":type A: List[int] :rtype: int", "name": "partitionDisjoint", "signature": "def partitionDisjoint(self, A)"}, {"docstring": ":type A: List[int] :rtype: int", "name": "partitionDisjoint2", "signature": "def partitionDisjoint2(self, A)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_011915", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def partitionDisjoint(self, A): :type A: List[int] :rtype: int\n- def partitionDisjoint2(self, A): :type A: List[int] :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def partitionDisjoint(self, A): :type A: List[int] :rtype: int\n- def partitionDisjoint2(self, A): :type A: List[int] :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def partitionDisjoint(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def partitionDisjoint2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pivot = 0\n maxL = A[0]\n max_last = 0\n for i, num in enumerate(A):\n max_last = max(max_last, num)\n if num < maxL:\n pivot = i\n maxL = max_last\n return pivot + 1\n<|end_body_0|>\n\n<|body_start_1|>\n info = sorted(([num, i] for i, num in enumerate(A)))\n s = 0\n for index, (num, i) in enumerate(info):\n s += i\n if s == (index + 1) * index / 2:\n return index + 1\n<|end_body_1|>\n", "revision_id": "70bdd75b6af2e1811c1beab22050c01d28d7373e", "skeleton": "<|skeleton|>\nclass Solution:\n\n def partitionDisjoint(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_0|>\n\n def partitionDisjoint2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def partitionDisjoint(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n pivot = 0\n maxL = A[0]\n max_last = 0\n for i, num in enumerate(A):\n max_last = max(max_last, num)\n if num < maxL:\n pivot = i\n maxL = max_last\n return pivot + 1\n\n def partitionDisjoint2(self, A):\n \"\"\":type A: List[int] :rtype: int\"\"\"\n info = sorted(([num, i] for i, num in enumerate(A)))\n s = 0\n for index, (num, i) in enumerate(info):\n s += i\n if s == (index + 1) * index / 2:\n return index + 1\n", "source": "the_stack_v2_python_sparse", "source_path": "python/leetcode/915_Partition_Array_into_Disjoint_Intervals.py", "source_repo": "bobcaoge/my-code", "split": "test", "star_events_count": 0} {"blob_id": "ba99a4218b8a6acee049fd3c2e9ef71794961369", "bodies": ["self._data_set = data_set\nself._missing_data_strategy = missing_data_strategy\nself._tz_aware = isinstance(self._data_set.index[0], dt.datetime) and self._data_set.index[0].tzinfo is not None\nif self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set.interpolate()\nelif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set.ffill()", "if isinstance(state, Iterable):\n return [self.get_data(i) for i in state]\nif self._tz_aware and (state.tzinfo is None or state.tzinfo.utcoffset(state) is None):\n state = pytz.utc.localize(state)\nif pd.Timestamp(state) in self._data_set:\n return self._data_set[pd.Timestamp(state)]\nelif state in self._data_set or self._missing_data_strategy == MissingDataStrategy.fail:\n return self._data_set[state]\nelse:\n if isinstance(self._data_set.index, pd.DatetimeIndex):\n self._data_set.at[pd.to_datetime(state)] = np.nan\n self._data_set.sort_index(inplace=True)\n else:\n self._data_set.at[state] = np.nan\n self._data_set.sort_index()\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set = self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set = self._data_set.ffill()\n else:\n raise RuntimeError(f'unrecognised missing data strategy: {str(self._missing_data_strategy)}')\n return self._data_set[pd.to_datetime(state)] if isinstance(self._data_set.index, pd.DatetimeIndex) else self._data_set[state]", "if isinstance(end, int):\n return self._data_set.loc[self._data_set.index < start].tail(end)\nreturn self._data_set.loc[(start < self._data_set.index) & (self._data_set.index <= end)]"], "bodies_text": "<|body_start_0|>\n self._data_set = data_set\n self._missing_data_strategy = missing_data_strategy\n self._tz_aware = isinstance(self._data_set.index[0], dt.datetime) and self._data_set.index[0].tzinfo is not None\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set.ffill()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(state, Iterable):\n return [self.get_data(i) for i in state]\n if self._tz_aware and (state.tzinfo is None or state.tzinfo.utcoffset(state) is None):\n state = pytz.utc.localize(state)\n if pd.Timestamp(state) in self._data_set:\n return self._data_set[pd.Timestamp(state)]\n elif state in self._data_set or self._missing_data_strategy == MissingDataStrategy.fail:\n return self._data_set[state]\n else:\n if isinstance(self._data_set.index, pd.DatetimeIndex):\n self._data_set.at[pd.to_datetime(state)] = np.nan\n self._data_set.sort_index(inplace=True)\n else:\n self._data_set.at[state] = np.nan\n self._data_set.sort_index()\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set = self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set = self._data_set.ffill()\n else:\n raise RuntimeError(f'unrecognised missing data strategy: {str(self._missing_data_strategy)}')\n return self._data_set[pd.to_datetime(state)] if isinstance(self._data_set.index, pd.DatetimeIndex) else self._data_set[state]\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(end, int):\n return self._data_set.loc[self._data_set.index < start].tail(end)\n return self._data_set.loc[(start < self._data_set.index) & (self._data_set.index <= end)]\n<|end_body_2|>\n", "class_docstring": "", "class_name": "GenericDataSource", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass GenericDataSource:\n\n def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail):\n \"\"\"A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.\"\"\"\n <|body_0|>\n\n def get_data(self, state: Union[dt.date, dt.datetime, Iterable]):\n \"\"\"Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value\"\"\"\n <|body_1|>\n\n def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int]):\n \"\"\"get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._data_set = data_set\n self._missing_data_strategy = missing_data_strategy\n self._tz_aware = isinstance(self._data_set.index[0], dt.datetime) and self._data_set.index[0].tzinfo is not None\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set.ffill()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(state, Iterable):\n return [self.get_data(i) for i in state]\n if self._tz_aware and (state.tzinfo is None or state.tzinfo.utcoffset(state) is None):\n state = pytz.utc.localize(state)\n if pd.Timestamp(state) in self._data_set:\n return self._data_set[pd.Timestamp(state)]\n elif state in self._data_set or self._missing_data_strategy == MissingDataStrategy.fail:\n return self._data_set[state]\n else:\n if isinstance(self._data_set.index, pd.DatetimeIndex):\n self._data_set.at[pd.to_datetime(state)] = np.nan\n self._data_set.sort_index(inplace=True)\n else:\n self._data_set.at[state] = np.nan\n self._data_set.sort_index()\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set = self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set = self._data_set.ffill()\n else:\n raise RuntimeError(f'unrecognised missing data strategy: {str(self._missing_data_strategy)}')\n return self._data_set[pd.to_datetime(state)] if isinstance(self._data_set.index, pd.DatetimeIndex) else self._data_set[state]\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(end, int):\n return self._data_set.loc[self._data_set.index < start].tail(end)\n return self._data_set.loc[(start < self._data_set.index) & (self._data_set.index <= end)]\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000395", "length_bytes": 6958, "license_type": "permissive", "methods": [{"docstring": "A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.", "name": "__init__", "signature": "def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail)"}, {"docstring": "Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value", "name": "get_data", "signature": "def get_data(self, state: Union[dt.date, dt.datetime, Iterable])"}, {"docstring": "get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series", "name": "get_data_range", "signature": "def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int])"}], "n_methods": 3, "prompt": "Implement the Python class `GenericDataSource` described below.\n\nClass description:\nImplement the GenericDataSource class.\n\nMethod signatures and docstrings:\n- def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail): A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.\n- def get_data(self, state: Union[dt.date, dt.datetime, Iterable]): Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value\n- def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int]): get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series", "prompted_full_text": "Implement the Python class `GenericDataSource` described below.\n\nClass description:\nImplement the GenericDataSource class.\n\nMethod signatures and docstrings:\n- def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail): A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.\n- def get_data(self, state: Union[dt.date, dt.datetime, Iterable]): Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value\n- def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int]): get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series\n\n<|skeleton|>\nclass GenericDataSource:\n\n def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail):\n \"\"\"A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.\"\"\"\n <|body_0|>\n\n def get_data(self, state: Union[dt.date, dt.datetime, Iterable]):\n \"\"\"Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value\"\"\"\n <|body_1|>\n\n def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int]):\n \"\"\"get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._data_set = data_set\n self._missing_data_strategy = missing_data_strategy\n self._tz_aware = isinstance(self._data_set.index[0], dt.datetime) and self._data_set.index[0].tzinfo is not None\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set.ffill()\n<|end_body_0|>\n\n<|body_start_1|>\n if isinstance(state, Iterable):\n return [self.get_data(i) for i in state]\n if self._tz_aware and (state.tzinfo is None or state.tzinfo.utcoffset(state) is None):\n state = pytz.utc.localize(state)\n if pd.Timestamp(state) in self._data_set:\n return self._data_set[pd.Timestamp(state)]\n elif state in self._data_set or self._missing_data_strategy == MissingDataStrategy.fail:\n return self._data_set[state]\n else:\n if isinstance(self._data_set.index, pd.DatetimeIndex):\n self._data_set.at[pd.to_datetime(state)] = np.nan\n self._data_set.sort_index(inplace=True)\n else:\n self._data_set.at[state] = np.nan\n self._data_set.sort_index()\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set = self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set = self._data_set.ffill()\n else:\n raise RuntimeError(f'unrecognised missing data strategy: {str(self._missing_data_strategy)}')\n return self._data_set[pd.to_datetime(state)] if isinstance(self._data_set.index, pd.DatetimeIndex) else self._data_set[state]\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(end, int):\n return self._data_set.loc[self._data_set.index < start].tail(end)\n return self._data_set.loc[(start < self._data_set.index) & (self._data_set.index <= end)]\n<|end_body_2|>\n", "revision_id": "4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2", "skeleton": "<|skeleton|>\nclass GenericDataSource:\n\n def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail):\n \"\"\"A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.\"\"\"\n <|body_0|>\n\n def get_data(self, state: Union[dt.date, dt.datetime, Iterable]):\n \"\"\"Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value\"\"\"\n <|body_1|>\n\n def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int]):\n \"\"\"get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class GenericDataSource:\n def __init__(self, data_set: pd.Series, missing_data_strategy: MissingDataStrategy=MissingDataStrategy.fail):\n \"\"\"A data source which holds a pandas series indexed by date or datetime :param data_set: a pandas dataframe indexed by date or datetime :param missing_data_strategy: MissingDataStrategy which defines behaviour if data is missing, will only take effect if using get_data, gat_data_range has no expectations of the number of expected data points.\"\"\"\n self._data_set = data_set\n self._missing_data_strategy = missing_data_strategy\n self._tz_aware = isinstance(self._data_set.index[0], dt.datetime) and self._data_set.index[0].tzinfo is not None\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set.ffill()\n\n def get_data(self, state: Union[dt.date, dt.datetime, Iterable]):\n \"\"\"Get the value of the dataset at a time or date. If a list of dates or times is provided return the avg value :param state: a date, datetime or a list of dates or datetimes :return: float value\"\"\"\n if isinstance(state, Iterable):\n return [self.get_data(i) for i in state]\n if self._tz_aware and (state.tzinfo is None or state.tzinfo.utcoffset(state) is None):\n state = pytz.utc.localize(state)\n if pd.Timestamp(state) in self._data_set:\n return self._data_set[pd.Timestamp(state)]\n elif state in self._data_set or self._missing_data_strategy == MissingDataStrategy.fail:\n return self._data_set[state]\n else:\n if isinstance(self._data_set.index, pd.DatetimeIndex):\n self._data_set.at[pd.to_datetime(state)] = np.nan\n self._data_set.sort_index(inplace=True)\n else:\n self._data_set.at[state] = np.nan\n self._data_set.sort_index()\n if self._missing_data_strategy == MissingDataStrategy.interpolate:\n self._data_set = self._data_set.interpolate()\n elif self._missing_data_strategy == MissingDataStrategy.fill_forward:\n self._data_set = self._data_set.ffill()\n else:\n raise RuntimeError(f'unrecognised missing data strategy: {str(self._missing_data_strategy)}')\n return self._data_set[pd.to_datetime(state)] if isinstance(self._data_set.index, pd.DatetimeIndex) else self._data_set[state]\n\n def get_data_range(self, start: Union[dt.date, dt.datetime], end: Union[dt.date, dt.datetime, int]):\n \"\"\"get a range of values from the dataset. :param start: a date or datetime :param end: a date, datetime or an int. If an int is provided we return that many data points back from the start date :return: pd.Series\"\"\"\n if isinstance(end, int):\n return self._data_set.loc[self._data_set.index < start].tail(end)\n return self._data_set.loc[(start < self._data_set.index) & (self._data_set.index <= end)]\n", "source": "the_stack_v2_python_sparse", "source_path": "gs_quant/backtests/data_sources.py", "source_repo": "goldmansachs/gs-quant", "split": "test", "star_events_count": 2088} {"blob_id": "603144289256955b99edfe8b46f6e7ba00b0576c", "bodies": ["n = self.environment.action_space.n\ne = extra[0]\ninfo = extra[1]\ns = extra[2]\nk = max(0, e - 2 * s / n)\naccumulation = np.zeros(n)\nsummation = 0\nfor i in range(n):\n summation += info[i][1] + k\n accumulation[i] = summation\nif summation == 0:\n return self.environment.action_space.sample()\nrandom_number = self.generator.uniform(low=0, high=summation)\nfor i in range(n):\n if random_number <= accumulation[i]:\n return info[i][0]\nprint('Warning: agent_pql_exp._best_action: Selecting emergency action')\nreturn info[n - 1][0]", "n = self.environment.action_space.n\ne = extra[0]\ninfo = extra[1]\ns = extra[2]\nk = max(0, e - 2 * s / n)\ns2 = s + n * k\nacu = np.zeros(n)\nif s == 0:\n return self.environment.action_space.sample()\nns2 = s2 * n\nsummation = 0\nfor i in range(n):\n aux = 2 * s2 - n * (info[i][1] + k)\n summation += aux\n acu[i] = summation\nnum = self.generator.uniform(low=0, high=ns2)\nfor i in range(n):\n if num <= acu[i]:\n return info[i][0]\nprint('Warning: agent_pql_exp2._non_greedy_action: Selecting emergency action')\nreturn info[n - 1][0]"], "bodies_text": "<|body_start_0|>\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n accumulation = np.zeros(n)\n summation = 0\n for i in range(n):\n summation += info[i][1] + k\n accumulation[i] = summation\n if summation == 0:\n return self.environment.action_space.sample()\n random_number = self.generator.uniform(low=0, high=summation)\n for i in range(n):\n if random_number <= accumulation[i]:\n return info[i][0]\n print('Warning: agent_pql_exp._best_action: Selecting emergency action')\n return info[n - 1][0]\n<|end_body_0|>\n\n<|body_start_1|>\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n s2 = s + n * k\n acu = np.zeros(n)\n if s == 0:\n return self.environment.action_space.sample()\n ns2 = s2 * n\n summation = 0\n for i in range(n):\n aux = 2 * s2 - n * (info[i][1] + k)\n summation += aux\n acu[i] = summation\n num = self.generator.uniform(low=0, high=ns2)\n for i in range(n):\n if num <= acu[i]:\n return info[i][0]\n print('Warning: agent_pql_exp2._non_greedy_action: Selecting emergency action')\n return info[n - 1][0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AgentPQLEXP3", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AgentPQLEXP3:\n\n def _best_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:\"\"\"\n <|body_0|>\n\n def _non_greedy_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n accumulation = np.zeros(n)\n summation = 0\n for i in range(n):\n summation += info[i][1] + k\n accumulation[i] = summation\n if summation == 0:\n return self.environment.action_space.sample()\n random_number = self.generator.uniform(low=0, high=summation)\n for i in range(n):\n if random_number <= accumulation[i]:\n return info[i][0]\n print('Warning: agent_pql_exp._best_action: Selecting emergency action')\n return info[n - 1][0]\n<|end_body_0|>\n\n<|body_start_1|>\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n s2 = s + n * k\n acu = np.zeros(n)\n if s == 0:\n return self.environment.action_space.sample()\n ns2 = s2 * n\n summation = 0\n for i in range(n):\n aux = 2 * s2 - n * (info[i][1] + k)\n summation += aux\n acu[i] = summation\n num = self.generator.uniform(low=0, high=ns2)\n for i in range(n):\n if num <= acu[i]:\n return info[i][0]\n print('Warning: agent_pql_exp2._non_greedy_action: Selecting emergency action')\n return info[n - 1][0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000396", "length_bytes": 3406, "license_type": "no_license", "methods": [{"docstring": "Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:", "name": "_best_action", "signature": "def _best_action(self, state: object=None, extra: object=None) -> int"}, {"docstring": "Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:", "name": "_non_greedy_action", "signature": "def _non_greedy_action(self, state: object=None, extra: object=None) -> int"}], "n_methods": 2, "prompt": "Implement the Python class `AgentPQLEXP3` described below.\n\nClass description:\nImplement the AgentPQLEXP3 class.\n\nMethod signatures and docstrings:\n- def _best_action(self, state: object=None, extra: object=None) -> int: Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:\n- def _non_greedy_action(self, state: object=None, extra: object=None) -> int: Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:", "prompted_full_text": "Implement the Python class `AgentPQLEXP3` described below.\n\nClass description:\nImplement the AgentPQLEXP3 class.\n\nMethod signatures and docstrings:\n- def _best_action(self, state: object=None, extra: object=None) -> int: Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:\n- def _non_greedy_action(self, state: object=None, extra: object=None) -> int: Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:\n\n<|skeleton|>\nclass AgentPQLEXP3:\n\n def _best_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:\"\"\"\n <|body_0|>\n\n def _non_greedy_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n accumulation = np.zeros(n)\n summation = 0\n for i in range(n):\n summation += info[i][1] + k\n accumulation[i] = summation\n if summation == 0:\n return self.environment.action_space.sample()\n random_number = self.generator.uniform(low=0, high=summation)\n for i in range(n):\n if random_number <= accumulation[i]:\n return info[i][0]\n print('Warning: agent_pql_exp._best_action: Selecting emergency action')\n return info[n - 1][0]\n<|end_body_0|>\n\n<|body_start_1|>\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n s2 = s + n * k\n acu = np.zeros(n)\n if s == 0:\n return self.environment.action_space.sample()\n ns2 = s2 * n\n summation = 0\n for i in range(n):\n aux = 2 * s2 - n * (info[i][1] + k)\n summation += aux\n acu[i] = summation\n num = self.generator.uniform(low=0, high=ns2)\n for i in range(n):\n if num <= acu[i]:\n return info[i][0]\n print('Warning: agent_pql_exp2._non_greedy_action: Selecting emergency action')\n return info[n - 1][0]\n<|end_body_1|>\n", "revision_id": "b51c64c867e15356c9f978839fd0040182324edd", "skeleton": "<|skeleton|>\nclass AgentPQLEXP3:\n\n def _best_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:\"\"\"\n <|body_0|>\n\n def _non_greedy_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AgentPQLEXP3:\n def _best_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action proportional to the credit indicated in train_data. If necessary, selection probabilities are smoothed. train_data is a tuple (maximum_credit, list of tuples: (action, credit), sum_credit) Let E be the maximum credit, N the number of actions, and S the sum of all credits. If E > 2S/N, then laplacian smoothing is carried out (otherwise, it is not always possible to calculate the :param extra: :param state: :return:\"\"\"\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n accumulation = np.zeros(n)\n summation = 0\n for i in range(n):\n summation += info[i][1] + k\n accumulation[i] = summation\n if summation == 0:\n return self.environment.action_space.sample()\n random_number = self.generator.uniform(low=0, high=summation)\n for i in range(n):\n if random_number <= accumulation[i]:\n return info[i][0]\n print('Warning: agent_pql_exp._best_action: Selecting emergency action')\n return info[n - 1][0]\n\n def _non_greedy_action(self, state: object=None, extra: object=None) -> int:\n \"\"\"Select action with probability inversely proportional to the credit indicated in extra. If necessary, probabilities are smoothed so that with epsilon = 0.5, a random walk is obtained. train_data is a tuple. The first element is the maximum credit, the second a list of tuples: (action, credit), and the third the sum of credits. :param state: :param extra: :return:\"\"\"\n n = self.environment.action_space.n\n e = extra[0]\n info = extra[1]\n s = extra[2]\n k = max(0, e - 2 * s / n)\n s2 = s + n * k\n acu = np.zeros(n)\n if s == 0:\n return self.environment.action_space.sample()\n ns2 = s2 * n\n summation = 0\n for i in range(n):\n aux = 2 * s2 - n * (info[i][1] + k)\n summation += aux\n acu[i] = summation\n num = self.generator.uniform(low=0, high=ns2)\n for i in range(n):\n if num <= acu[i]:\n return info[i][0]\n print('Warning: agent_pql_exp2._non_greedy_action: Selecting emergency action')\n return info[n - 1][0]\n", "source": "the_stack_v2_python_sparse", "source_path": "agents/agent_pql_exp3.py", "source_repo": "Pozas91/tiadas", "split": "test", "star_events_count": 1} {"blob_id": "1af32c233fa1289e2541dbf6607a6f358cef0859", "bodies": ["try:\n search = request.GET.get('search', '')\n if search:\n brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search)\n else:\n brands = self.get_filter_objects(BrandModel, brand=brand_id)\n serializer = serializers.BrandModelSerializer(brands, many=True)\n return Utils.dispatch_success(request, serializer.data)\nexcept Exception as e:\n return self.internal_server_error(request, e)", "try:\n data = request.data\n data['brand'] = brand_id\n serializer = serializers.AddBrandModelSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Utils.dispatch_success(request, serializer.data)\n return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)\nexcept Exception as e:\n return self.internal_server_error(request, e)"], "bodies_text": "<|body_start_0|>\n try:\n search = request.GET.get('search', '')\n if search:\n brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search)\n else:\n brands = self.get_filter_objects(BrandModel, brand=brand_id)\n serializer = serializers.BrandModelSerializer(brands, many=True)\n return Utils.dispatch_success(request, serializer.data)\n except Exception as e:\n return self.internal_server_error(request, e)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n data = request.data\n data['brand'] = brand_id\n serializer = serializers.AddBrandModelSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Utils.dispatch_success(request, serializer.data)\n return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)\n except Exception as e:\n return self.internal_server_error(request, e)\n<|end_body_1|>\n", "class_docstring": "Brand Model List and create Endpoint", "class_name": "BrandModelList", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BrandModelList:\n \"\"\"Brand Model List and create Endpoint\"\"\"\n\n def get(self, request, brand_id):\n \"\"\"Returnt the list of Models of particular brand :param request: :param brand_id: :return:\"\"\"\n <|body_0|>\n\n def post(self, request, brand_id):\n \"\"\"Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n search = request.GET.get('search', '')\n if search:\n brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search)\n else:\n brands = self.get_filter_objects(BrandModel, brand=brand_id)\n serializer = serializers.BrandModelSerializer(brands, many=True)\n return Utils.dispatch_success(request, serializer.data)\n except Exception as e:\n return self.internal_server_error(request, e)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n data = request.data\n data['brand'] = brand_id\n serializer = serializers.AddBrandModelSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Utils.dispatch_success(request, serializer.data)\n return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)\n except Exception as e:\n return self.internal_server_error(request, e)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000397", "length_bytes": 23745, "license_type": "permissive", "methods": [{"docstring": "Returnt the list of Models of particular brand :param request: :param brand_id: :return:", "name": "get", "signature": "def get(self, request, brand_id)"}, {"docstring": "Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:", "name": "post", "signature": "def post(self, request, brand_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_018131", "prompt": "Implement the Python class `BrandModelList` described below.\n\nClass description:\nBrand Model List and create Endpoint\n\nMethod signatures and docstrings:\n- def get(self, request, brand_id): Returnt the list of Models of particular brand :param request: :param brand_id: :return:\n- def post(self, request, brand_id): Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:", "prompted_full_text": "Implement the Python class `BrandModelList` described below.\n\nClass description:\nBrand Model List and create Endpoint\n\nMethod signatures and docstrings:\n- def get(self, request, brand_id): Returnt the list of Models of particular brand :param request: :param brand_id: :return:\n- def post(self, request, brand_id): Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:\n\n<|skeleton|>\nclass BrandModelList:\n \"\"\"Brand Model List and create Endpoint\"\"\"\n\n def get(self, request, brand_id):\n \"\"\"Returnt the list of Models of particular brand :param request: :param brand_id: :return:\"\"\"\n <|body_0|>\n\n def post(self, request, brand_id):\n \"\"\"Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n search = request.GET.get('search', '')\n if search:\n brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search)\n else:\n brands = self.get_filter_objects(BrandModel, brand=brand_id)\n serializer = serializers.BrandModelSerializer(brands, many=True)\n return Utils.dispatch_success(request, serializer.data)\n except Exception as e:\n return self.internal_server_error(request, e)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n data = request.data\n data['brand'] = brand_id\n serializer = serializers.AddBrandModelSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Utils.dispatch_success(request, serializer.data)\n return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)\n except Exception as e:\n return self.internal_server_error(request, e)\n<|end_body_1|>\n", "revision_id": "1e31affddf60d2de72445a85dd2055bdeba6f670", "skeleton": "<|skeleton|>\nclass BrandModelList:\n \"\"\"Brand Model List and create Endpoint\"\"\"\n\n def get(self, request, brand_id):\n \"\"\"Returnt the list of Models of particular brand :param request: :param brand_id: :return:\"\"\"\n <|body_0|>\n\n def post(self, request, brand_id):\n \"\"\"Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BrandModelList:\n \"\"\"Brand Model List and create Endpoint\"\"\"\n\n def get(self, request, brand_id):\n \"\"\"Returnt the list of Models of particular brand :param request: :param brand_id: :return:\"\"\"\n try:\n search = request.GET.get('search', '')\n if search:\n brands = self.get_filter_objects(BrandModel, brand=brand_id, model_name__icontains=search)\n else:\n brands = self.get_filter_objects(BrandModel, brand=brand_id)\n serializer = serializers.BrandModelSerializer(brands, many=True)\n return Utils.dispatch_success(request, serializer.data)\n except Exception as e:\n return self.internal_server_error(request, e)\n\n def post(self, request, brand_id):\n \"\"\"Creates a new brand model :param request: { \"model_name\" : \"Unicorn\" } :param brand_id: :return:\"\"\"\n try:\n data = request.data\n data['brand'] = brand_id\n serializer = serializers.AddBrandModelSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Utils.dispatch_success(request, serializer.data)\n return Utils.dispatch_failure(request, 'VALIDATION_ERROR', serializer.errors)\n except Exception as e:\n return self.internal_server_error(request, e)\n", "source": "the_stack_v2_python_sparse", "source_path": "the_mechanic_backend/v0/stock/views.py", "source_repo": "muthukumar4999/the-mechanic-backend", "split": "test", "star_events_count": 0} {"blob_id": "ee7b20c899045c355f143aaf05dd736a276a9ae8", "bodies": ["self.session = Session()\nself.encode = 'utf-8'\nself.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Host': 'www.baidu.com', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}", "response = self.session.get(url, headers=self.headers, allow_redirects=True)\nprint(response.url)\nresponse.encoding = self.encode\nreturn response.content", "source = etree.HTML(html)\nparse_html = source.xpath(path)\nprint(parse_html)\nif parse_html:\n return parse_html\nreturn None"], "bodies_text": "<|body_start_0|>\n self.session = Session()\n self.encode = 'utf-8'\n self.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Host': 'www.baidu.com', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.session.get(url, headers=self.headers, allow_redirects=True)\n print(response.url)\n response.encoding = self.encode\n return response.content\n<|end_body_1|>\n\n<|body_start_2|>\n source = etree.HTML(html)\n parse_html = source.xpath(path)\n print(parse_html)\n if parse_html:\n return parse_html\n return None\n<|end_body_2|>\n", "class_docstring": "父类", "class_name": "Base", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Base:\n \"\"\"父类\"\"\"\n\n def __init__(self, encode=None):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def request(self, url):\n \"\"\"request active\"\"\"\n <|body_1|>\n\n def parse(self, html, path):\n \"\"\"parse page element\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.session = Session()\n self.encode = 'utf-8'\n self.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Host': 'www.baidu.com', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.session.get(url, headers=self.headers, allow_redirects=True)\n print(response.url)\n response.encoding = self.encode\n return response.content\n<|end_body_1|>\n\n<|body_start_2|>\n source = etree.HTML(html)\n parse_html = source.xpath(path)\n print(parse_html)\n if parse_html:\n return parse_html\n return None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000398", "length_bytes": 2967, "license_type": "no_license", "methods": [{"docstring": "init", "name": "__init__", "signature": "def __init__(self, encode=None)"}, {"docstring": "request active", "name": "request", "signature": "def request(self, url)"}, {"docstring": "parse page element", "name": "parse", "signature": "def parse(self, html, path)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_028746", "prompt": "Implement the Python class `Base` described below.\n\nClass description:\n父类\n\nMethod signatures and docstrings:\n- def __init__(self, encode=None): init\n- def request(self, url): request active\n- def parse(self, html, path): parse page element", "prompted_full_text": "Implement the Python class `Base` described below.\n\nClass description:\n父类\n\nMethod signatures and docstrings:\n- def __init__(self, encode=None): init\n- def request(self, url): request active\n- def parse(self, html, path): parse page element\n\n<|skeleton|>\nclass Base:\n \"\"\"父类\"\"\"\n\n def __init__(self, encode=None):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def request(self, url):\n \"\"\"request active\"\"\"\n <|body_1|>\n\n def parse(self, html, path):\n \"\"\"parse page element\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.session = Session()\n self.encode = 'utf-8'\n self.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Host': 'www.baidu.com', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}\n<|end_body_0|>\n\n<|body_start_1|>\n response = self.session.get(url, headers=self.headers, allow_redirects=True)\n print(response.url)\n response.encoding = self.encode\n return response.content\n<|end_body_1|>\n\n<|body_start_2|>\n source = etree.HTML(html)\n parse_html = source.xpath(path)\n print(parse_html)\n if parse_html:\n return parse_html\n return None\n<|end_body_2|>\n", "revision_id": "b8dd4dd6dafaf9899e97bbb75a3ef80246ec427b", "skeleton": "<|skeleton|>\nclass Base:\n \"\"\"父类\"\"\"\n\n def __init__(self, encode=None):\n \"\"\"init\"\"\"\n <|body_0|>\n\n def request(self, url):\n \"\"\"request active\"\"\"\n <|body_1|>\n\n def parse(self, html, path):\n \"\"\"parse page element\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Base:\n \"\"\"父类\"\"\"\n\n def __init__(self, encode=None):\n \"\"\"init\"\"\"\n self.session = Session()\n self.encode = 'utf-8'\n self.headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'Accept-Encoding': 'gzip, deflate, sdch, br', 'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6', 'Cache-Control': 'max-age=0', 'Connection': 'keep-alive', 'Host': 'www.baidu.com', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'}\n\n def request(self, url):\n \"\"\"request active\"\"\"\n response = self.session.get(url, headers=self.headers, allow_redirects=True)\n print(response.url)\n response.encoding = self.encode\n return response.content\n\n def parse(self, html, path):\n \"\"\"parse page element\"\"\"\n source = etree.HTML(html)\n parse_html = source.xpath(path)\n print(parse_html)\n if parse_html:\n return parse_html\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "fourth_week/seventh_day/encapsulation.py", "source_repo": "czkun1986/Let-s-go-python-", "split": "test", "star_events_count": 0} {"blob_id": "fb080344650c64402eba3ddca1d5f8b08c6d765a", "bodies": ["ssl._create_default_https_context = ssl.create_default_context\nsuper().__init__(domain, username, password)\nself._username = username\nself._password = password\nself.__testSolarLogin()", "try:\n testConnection = self.query('SELECT AccountID FROM Orion.Accounts')\nexcept (OSError, Exception):\n raise BadLoginInformation(self._username, self._password)"], "bodies_text": "<|body_start_0|>\n ssl._create_default_https_context = ssl.create_default_context\n super().__init__(domain, username, password)\n self._username = username\n self._password = password\n self.__testSolarLogin()\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n testConnection = self.query('SELECT AccountID FROM Orion.Accounts')\n except (OSError, Exception):\n raise BadLoginInformation(self._username, self._password)\n<|end_body_1|>\n", "class_docstring": "Class Name: Solarwinds Class Purpose: To manage Solarwinds objects", "class_name": "Solarwinds", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solarwinds:\n \"\"\"Class Name: Solarwinds Class Purpose: To manage Solarwinds objects\"\"\"\n\n def __init__(self, domain, username, password):\n \"\"\"Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None\"\"\"\n <|body_0|>\n\n def __testSolarLogin(self):\n \"\"\"This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ssl._create_default_https_context = ssl.create_default_context\n super().__init__(domain, username, password)\n self._username = username\n self._password = password\n self.__testSolarLogin()\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n testConnection = self.query('SELECT AccountID FROM Orion.Accounts')\n except (OSError, Exception):\n raise BadLoginInformation(self._username, self._password)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000399", "length_bytes": 16799, "license_type": "no_license", "methods": [{"docstring": "Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None", "name": "__init__", "signature": "def __init__(self, domain, username, password)"}, {"docstring": "This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise", "name": "__testSolarLogin", "signature": "def __testSolarLogin(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_047447", "prompt": "Implement the Python class `Solarwinds` described below.\n\nClass description:\nClass Name: Solarwinds Class Purpose: To manage Solarwinds objects\n\nMethod signatures and docstrings:\n- def __init__(self, domain, username, password): Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None\n- def __testSolarLogin(self): This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise", "prompted_full_text": "Implement the Python class `Solarwinds` described below.\n\nClass description:\nClass Name: Solarwinds Class Purpose: To manage Solarwinds objects\n\nMethod signatures and docstrings:\n- def __init__(self, domain, username, password): Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None\n- def __testSolarLogin(self): This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise\n\n<|skeleton|>\nclass Solarwinds:\n \"\"\"Class Name: Solarwinds Class Purpose: To manage Solarwinds objects\"\"\"\n\n def __init__(self, domain, username, password):\n \"\"\"Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None\"\"\"\n <|body_0|>\n\n def __testSolarLogin(self):\n \"\"\"This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ssl._create_default_https_context = ssl.create_default_context\n super().__init__(domain, username, password)\n self._username = username\n self._password = password\n self.__testSolarLogin()\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n testConnection = self.query('SELECT AccountID FROM Orion.Accounts')\n except (OSError, Exception):\n raise BadLoginInformation(self._username, self._password)\n<|end_body_1|>\n", "revision_id": "51402171b4f3f1823c91bd7cd830027c312b292f", "skeleton": "<|skeleton|>\nclass Solarwinds:\n \"\"\"Class Name: Solarwinds Class Purpose: To manage Solarwinds objects\"\"\"\n\n def __init__(self, domain, username, password):\n \"\"\"Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None\"\"\"\n <|body_0|>\n\n def __testSolarLogin(self):\n \"\"\"This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solarwinds:\n \"\"\"Class Name: Solarwinds Class Purpose: To manage Solarwinds objects\"\"\"\n\n def __init__(self, domain, username, password):\n \"\"\"Method Name: __init__ Method Purpose: To create a Solarwinds instance Parameters: - domain (string): The domain name for the solarwinds instance - username (string): The username logging in - password (string): The password for the user Returns: None\"\"\"\n ssl._create_default_https_context = ssl.create_default_context\n super().__init__(domain, username, password)\n self._username = username\n self._password = password\n self.__testSolarLogin()\n\n def __testSolarLogin(self):\n \"\"\"This is a simple function to query and make sure that the right credentials are in play; should not be used otherwise\"\"\"\n try:\n testConnection = self.query('SELECT AccountID FROM Orion.Accounts')\n except (OSError, Exception):\n raise BadLoginInformation(self._username, self._password)\n", "source": "the_stack_v2_python_sparse", "source_path": "solarwinds.py", "source_repo": "dking74/Solarwinds-Availability-Dashboard", "split": "test", "star_events_count": 4} {"blob_id": "e7c2346eb99219742a7d46c817bba1194fdc6313", "bodies": ["dp = [[0] * len(l) for l in triangle]\ndp[0] = triangle[0]\nfor i in range(1, len(triangle)):\n for j in range(len(triangle[i])):\n l = dp[i - 1][j - 1] if j >= 1 else float('inf')\n m = dp[i - 1][j] if j < len(dp[i - 1]) else float('inf')\n ele = min(l, m)\n dp[i][j] = ele + triangle[i][j]\nreturn min(dp[-1])", "dp = [triangle[0][0]]\nlast = 0\nfor i in range(1, len(triangle)):\n min_val = float('inf')\n min_last = last\n for j in [last, last + 1]:\n if min_val > triangle[i][j]:\n min_val = triangle[i][j]\n min_last = j\n dp.append(min_val + dp[i - 1])\n last = min_last\nreturn dp[-1]"], "bodies_text": "<|body_start_0|>\n dp = [[0] * len(l) for l in triangle]\n dp[0] = triangle[0]\n for i in range(1, len(triangle)):\n for j in range(len(triangle[i])):\n l = dp[i - 1][j - 1] if j >= 1 else float('inf')\n m = dp[i - 1][j] if j < len(dp[i - 1]) else float('inf')\n ele = min(l, m)\n dp[i][j] = ele + triangle[i][j]\n return min(dp[-1])\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [triangle[0][0]]\n last = 0\n for i in range(1, len(triangle)):\n min_val = float('inf')\n min_last = last\n for j in [last, last + 1]:\n if min_val > triangle[i][j]:\n min_val = triangle[i][j]\n min_last = j\n dp.append(min_val + dp[i - 1])\n last = min_last\n return dp[-1]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n \"\"\"Dynamic Programming\"\"\"\n <|body_0|>\n\n def minimum_total(self, triangle):\n \"\"\"Linear Space\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [[0] * len(l) for l in triangle]\n dp[0] = triangle[0]\n for i in range(1, len(triangle)):\n for j in range(len(triangle[i])):\n l = dp[i - 1][j - 1] if j >= 1 else float('inf')\n m = dp[i - 1][j] if j < len(dp[i - 1]) else float('inf')\n ele = min(l, m)\n dp[i][j] = ele + triangle[i][j]\n return min(dp[-1])\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [triangle[0][0]]\n last = 0\n for i in range(1, len(triangle)):\n min_val = float('inf')\n min_last = last\n for j in [last, last + 1]:\n if min_val > triangle[i][j]:\n min_val = triangle[i][j]\n min_last = j\n dp.append(min_val + dp[i - 1])\n last = min_last\n return dp[-1]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000400", "length_bytes": 1014, "license_type": "no_license", "methods": [{"docstring": "Dynamic Programming", "name": "minimumTotal", "signature": "def minimumTotal(self, triangle: List[List[int]]) -> int"}, {"docstring": "Linear Space", "name": "minimum_total", "signature": "def minimum_total(self, triangle)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052350", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minimumTotal(self, triangle: List[List[int]]) -> int: Dynamic Programming\n- def minimum_total(self, triangle): Linear Space", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def minimumTotal(self, triangle: List[List[int]]) -> int: Dynamic Programming\n- def minimum_total(self, triangle): Linear Space\n\n<|skeleton|>\nclass Solution:\n\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n \"\"\"Dynamic Programming\"\"\"\n <|body_0|>\n\n def minimum_total(self, triangle):\n \"\"\"Linear Space\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n dp = [[0] * len(l) for l in triangle]\n dp[0] = triangle[0]\n for i in range(1, len(triangle)):\n for j in range(len(triangle[i])):\n l = dp[i - 1][j - 1] if j >= 1 else float('inf')\n m = dp[i - 1][j] if j < len(dp[i - 1]) else float('inf')\n ele = min(l, m)\n dp[i][j] = ele + triangle[i][j]\n return min(dp[-1])\n<|end_body_0|>\n\n<|body_start_1|>\n dp = [triangle[0][0]]\n last = 0\n for i in range(1, len(triangle)):\n min_val = float('inf')\n min_last = last\n for j in [last, last + 1]:\n if min_val > triangle[i][j]:\n min_val = triangle[i][j]\n min_last = j\n dp.append(min_val + dp[i - 1])\n last = min_last\n return dp[-1]\n<|end_body_1|>\n", "revision_id": "33252434f8d90b46fd2de07e257842331dcd81a8", "skeleton": "<|skeleton|>\nclass Solution:\n\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n \"\"\"Dynamic Programming\"\"\"\n <|body_0|>\n\n def minimum_total(self, triangle):\n \"\"\"Linear Space\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def minimumTotal(self, triangle: List[List[int]]) -> int:\n \"\"\"Dynamic Programming\"\"\"\n dp = [[0] * len(l) for l in triangle]\n dp[0] = triangle[0]\n for i in range(1, len(triangle)):\n for j in range(len(triangle[i])):\n l = dp[i - 1][j - 1] if j >= 1 else float('inf')\n m = dp[i - 1][j] if j < len(dp[i - 1]) else float('inf')\n ele = min(l, m)\n dp[i][j] = ele + triangle[i][j]\n return min(dp[-1])\n\n def minimum_total(self, triangle):\n \"\"\"Linear Space\"\"\"\n dp = [triangle[0][0]]\n last = 0\n for i in range(1, len(triangle)):\n min_val = float('inf')\n min_last = last\n for j in [last, last + 1]:\n if min_val > triangle[i][j]:\n min_val = triangle[i][j]\n min_last = j\n dp.append(min_val + dp[i - 1])\n last = min_last\n return dp[-1]\n", "source": "the_stack_v2_python_sparse", "source_path": "main/leetcode/120.py", "source_repo": "dawnonme/Eureka", "split": "test", "star_events_count": 0} {"blob_id": "3e5f1255c2276781a1a4f553bef9fa53919a388e", "bodies": ["s, i, r = xs\nif isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n N = parameters['N'].value\nelif isinstance(parameters, tuple):\n beta, gamma, N = parameters\nelse:\n raise ValueError('Cannot recognize parameter input')\ndSdt = -beta * s * i / N\ndIdt = beta * s * i / N - gamma * i\ndRdt = gamma * i\nreturn (dSdt, dIdt, dRdt)", "parameters = Parameters()\nparameters.add('N', value=N, min=0, max=N, vary=False)\nparameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\nparameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\nparameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\nparameters.add('beta', value=beta, min=0, max=beta_max, vary=not beta_fixed)\nparameters.add('gamma', value=gamma, min=0, max=gamma_max, vary=not gamma_fixed)\ninitial_conditions = ['S0', 'I0', 'R0']\nreturn (parameters, initial_conditions)"], "bodies_text": "<|body_start_0|>\n s, i, r = xs\n if isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n N = parameters['N'].value\n elif isinstance(parameters, tuple):\n beta, gamma, N = parameters\n else:\n raise ValueError('Cannot recognize parameter input')\n dSdt = -beta * s * i / N\n dIdt = beta * s * i / N - gamma * i\n dRdt = gamma * i\n return (dSdt, dIdt, dRdt)\n<|end_body_0|>\n\n<|body_start_1|>\n parameters = Parameters()\n parameters.add('N', value=N, min=0, max=N, vary=False)\n parameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\n parameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\n parameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\n parameters.add('beta', value=beta, min=0, max=beta_max, vary=not beta_fixed)\n parameters.add('gamma', value=gamma, min=0, max=gamma_max, vary=not gamma_fixed)\n initial_conditions = ['S0', 'I0', 'R0']\n return (parameters, initial_conditions)\n<|end_body_1|>\n", "class_docstring": "SIR Model", "class_name": "SIR", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SIR:\n \"\"\"SIR Model\"\"\"\n\n def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple:\n \"\"\"SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\"\"\"\n <|body_0|>\n\n def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple:\n \"\"\"Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s, i, r = xs\n if isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n N = parameters['N'].value\n elif isinstance(parameters, tuple):\n beta, gamma, N = parameters\n else:\n raise ValueError('Cannot recognize parameter input')\n dSdt = -beta * s * i / N\n dIdt = beta * s * i / N - gamma * i\n dRdt = gamma * i\n return (dSdt, dIdt, dRdt)\n<|end_body_0|>\n\n<|body_start_1|>\n parameters = Parameters()\n parameters.add('N', value=N, min=0, max=N, vary=False)\n parameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\n parameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\n parameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\n parameters.add('beta', value=beta, min=0, max=beta_max, vary=not beta_fixed)\n parameters.add('gamma', value=gamma, min=0, max=gamma_max, vary=not gamma_fixed)\n initial_conditions = ['S0', 'I0', 'R0']\n return (parameters, initial_conditions)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000401", "length_bytes": 29649, "license_type": "permissive", "methods": [{"docstring": "SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables", "name": "calibrate", "signature": "def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple"}, {"docstring": "Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed", "name": "get_parameters", "signature": "def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_001391", "prompt": "Implement the Python class `SIR` described below.\n\nClass description:\nSIR Model\n\nMethod signatures and docstrings:\n- def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple: SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\n- def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple: Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed", "prompted_full_text": "Implement the Python class `SIR` described below.\n\nClass description:\nSIR Model\n\nMethod signatures and docstrings:\n- def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple: SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\n- def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple: Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed\n\n<|skeleton|>\nclass SIR:\n \"\"\"SIR Model\"\"\"\n\n def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple:\n \"\"\"SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\"\"\"\n <|body_0|>\n\n def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple:\n \"\"\"Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n s, i, r = xs\n if isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n N = parameters['N'].value\n elif isinstance(parameters, tuple):\n beta, gamma, N = parameters\n else:\n raise ValueError('Cannot recognize parameter input')\n dSdt = -beta * s * i / N\n dIdt = beta * s * i / N - gamma * i\n dRdt = gamma * i\n return (dSdt, dIdt, dRdt)\n<|end_body_0|>\n\n<|body_start_1|>\n parameters = Parameters()\n parameters.add('N', value=N, min=0, max=N, vary=False)\n parameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\n parameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\n parameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\n parameters.add('beta', value=beta, min=0, max=beta_max, vary=not beta_fixed)\n parameters.add('gamma', value=gamma, min=0, max=gamma_max, vary=not gamma_fixed)\n initial_conditions = ['S0', 'I0', 'R0']\n return (parameters, initial_conditions)\n<|end_body_1|>\n", "revision_id": "4cf8ec75c4d85b16ec08371c46cc1a9ede9d72a2", "skeleton": "<|skeleton|>\nclass SIR:\n \"\"\"SIR Model\"\"\"\n\n def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple:\n \"\"\"SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\"\"\"\n <|body_0|>\n\n def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple:\n \"\"\"Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SIR:\n \"\"\"SIR Model\"\"\"\n\n def calibrate(cls, xs: tuple, t: float, parameters: Union[Parameters, tuple]) -> tuple:\n \"\"\"SIR model derivatives at t. :param xs: variables that we are solving for, i.e. [S]usceptible, [I]nfected, [R]emoved :param t: time parameter, inactive for this model :param parameters: parameters of the model (not including initial conditions), i.e. beta, gamma, N :return: tuple, the derivatives dSdt, dIdt, dRdt of each of the S, I, R variables\"\"\"\n s, i, r = xs\n if isinstance(parameters, Parameters):\n beta = parameters['beta'].value\n gamma = parameters['gamma'].value\n N = parameters['N'].value\n elif isinstance(parameters, tuple):\n beta, gamma, N = parameters\n else:\n raise ValueError('Cannot recognize parameter input')\n dSdt = -beta * s * i / N\n dIdt = beta * s * i / N - gamma * i\n dRdt = gamma * i\n return (dSdt, dIdt, dRdt)\n\n def get_parameters(cls, S0: float, I0: float, R0: float, N: float, beta: float=0.2, gamma: float=0.1, beta_max: float=10, gamma_max: float=1, S0_fixed: bool=True, S0_max: float=1000000.0, beta_fixed: bool=False, gamma_fixed: bool=False, R0_fixed: bool=True, R0_max: float=1000000.0, I0_fixed: bool=True, I0_max: float=1000000.0) -> tuple:\n \"\"\"Produce a set of parameters for the SIR model. :param S0: initial number of susceptible in the population :param I0: initial number of infected in the population, usually set to 1 :param R0: initial number of recovered/removed in the population, usually set to 0 :param N: size of the population :param beta: transmission rate parameter :param gamma: recovery rate parameter :param beta_max: maximum value to consider for beta during parameter fitting :param gamma_max: maximum value of gamma to consider during parameter fitting :param S0_fixed: whether to keep S0 fixed during fitting :param S0_max: maximum value of S0 to consider during parameter fitting :param R0_fixed: whether to keep R0 fixed\"\"\"\n parameters = Parameters()\n parameters.add('N', value=N, min=0, max=N, vary=False)\n parameters.add('S0', value=S0, min=0, max=S0_max, vary=not S0_fixed)\n parameters.add('I0', value=I0, min=0, max=I0_max, vary=not I0_fixed)\n parameters.add('R0', value=R0, min=0, max=R0_max, vary=not R0_fixed)\n parameters.add('beta', value=beta, min=0, max=beta_max, vary=not beta_fixed)\n parameters.add('gamma', value=gamma, min=0, max=gamma_max, vary=not gamma_fixed)\n initial_conditions = ['S0', 'I0', 'R0']\n return (parameters, initial_conditions)\n", "source": "the_stack_v2_python_sparse", "source_path": "gs_quant/models/epidemiology.py", "source_repo": "goldmansachs/gs-quant", "split": "test", "star_events_count": 2088} {"blob_id": "7fd543ee8b70b1669c41e61ac0e24e5595bb0b9c", "bodies": ["self.w = w\nself.n = len(w)\nself.s = sum(self.w)\nfor i in range(1, self.n):\n w[i] += w[i - 1]", "seed = random.randint(1, self.s)\nl, r = (0, self.n - 1)\nwhile l < r:\n mid = (l + r) // 2\n if seed <= self.w[mid]:\n r = mid\n else:\n l = mid + 1\nreturn l"], "bodies_text": "<|body_start_0|>\n self.w = w\n self.n = len(w)\n self.s = sum(self.w)\n for i in range(1, self.n):\n w[i] += w[i - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n seed = random.randint(1, self.s)\n l, r = (0, self.n - 1)\n while l < r:\n mid = (l + r) // 2\n if seed <= self.w[mid]:\n r = mid\n else:\n l = mid + 1\n return l\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n <|body_0|>\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.w = w\n self.n = len(w)\n self.s = sum(self.w)\n for i in range(1, self.n):\n w[i] += w[i - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n seed = random.randint(1, self.s)\n l, r = (0, self.n - 1)\n while l < r:\n mid = (l + r) // 2\n if seed <= self.w[mid]:\n r = mid\n else:\n l = mid + 1\n return l\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000402", "length_bytes": 570, "license_type": "no_license", "methods": [{"docstring": ":type w: List[int]", "name": "__init__", "signature": "def __init__(self, w)"}, {"docstring": ":rtype: int", "name": "pickIndex", "signature": "def pickIndex(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034438", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, w): :type w: List[int]\n- def pickIndex(self): :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def __init__(self, w): :type w: List[int]\n- def pickIndex(self): :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n <|body_0|>\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.w = w\n self.n = len(w)\n self.s = sum(self.w)\n for i in range(1, self.n):\n w[i] += w[i - 1]\n<|end_body_0|>\n\n<|body_start_1|>\n seed = random.randint(1, self.s)\n l, r = (0, self.n - 1)\n while l < r:\n mid = (l + r) // 2\n if seed <= self.w[mid]:\n r = mid\n else:\n l = mid + 1\n return l\n<|end_body_1|>\n", "revision_id": "eb3fc22450b362703c3322d9e975d191eb324ffc", "skeleton": "<|skeleton|>\nclass Solution:\n\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n <|body_0|>\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def __init__(self, w):\n \"\"\":type w: List[int]\"\"\"\n self.w = w\n self.n = len(w)\n self.s = sum(self.w)\n for i in range(1, self.n):\n w[i] += w[i - 1]\n\n def pickIndex(self):\n \"\"\":rtype: int\"\"\"\n seed = random.randint(1, self.s)\n l, r = (0, self.n - 1)\n while l < r:\n mid = (l + r) // 2\n if seed <= self.w[mid]:\n r = mid\n else:\n l = mid + 1\n return l\n", "source": "the_stack_v2_python_sparse", "source_path": "2-27/528-Random-Pick-with-Weight.py", "source_repo": "whalejasmine/leetcode_python_summary", "split": "test", "star_events_count": 0} {"blob_id": "af4665308c3c58c71f236ca95ab29478be425a56", "bodies": ["n = 0\nfor k in self:\n n += len(self[k])\nreturn n", "valid = []\nfor k in self:\n if len(self[k]) > 0:\n valid.append(k)\nreturn valid"], "bodies_text": "<|body_start_0|>\n n = 0\n for k in self:\n n += len(self[k])\n return n\n<|end_body_0|>\n\n<|body_start_1|>\n valid = []\n for k in self:\n if len(self[k]) > 0:\n valid.append(k)\n return valid\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FilterDict", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FilterDict:\n\n def nfiles(self):\n \"\"\"Count number of exposures\"\"\"\n <|body_0|>\n\n def valid_filters(self):\n \"\"\"Return a list of filters with N >= 1 files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = 0\n for k in self:\n n += len(self[k])\n return n\n<|end_body_0|>\n\n<|body_start_1|>\n valid = []\n for k in self:\n if len(self[k]) > 0:\n valid.append(k)\n return valid\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000403", "length_bytes": 46339, "license_type": "permissive", "methods": [{"docstring": "Count number of exposures", "name": "nfiles", "signature": "def nfiles(self)"}, {"docstring": "Return a list of filters with N >= 1 files", "name": "valid_filters", "signature": "def valid_filters(self)"}], "n_methods": 2, "prompt": "Implement the Python class `FilterDict` described below.\n\nClass description:\nImplement the FilterDict class.\n\nMethod signatures and docstrings:\n- def nfiles(self): Count number of exposures\n- def valid_filters(self): Return a list of filters with N >= 1 files", "prompted_full_text": "Implement the Python class `FilterDict` described below.\n\nClass description:\nImplement the FilterDict class.\n\nMethod signatures and docstrings:\n- def nfiles(self): Count number of exposures\n- def valid_filters(self): Return a list of filters with N >= 1 files\n\n<|skeleton|>\nclass FilterDict:\n\n def nfiles(self):\n \"\"\"Count number of exposures\"\"\"\n <|body_0|>\n\n def valid_filters(self):\n \"\"\"Return a list of filters with N >= 1 files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n n = 0\n for k in self:\n n += len(self[k])\n return n\n<|end_body_0|>\n\n<|body_start_1|>\n valid = []\n for k in self:\n if len(self[k]) > 0:\n valid.append(k)\n return valid\n<|end_body_1|>\n", "revision_id": "d1df9b03f09a918f1941f1459a41178eb3411341", "skeleton": "<|skeleton|>\nclass FilterDict:\n\n def nfiles(self):\n \"\"\"Count number of exposures\"\"\"\n <|body_0|>\n\n def valid_filters(self):\n \"\"\"Return a list of filters with N >= 1 files\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FilterDict:\n def nfiles(self):\n \"\"\"Count number of exposures\"\"\"\n n = 0\n for k in self:\n n += len(self[k])\n return n\n\n def valid_filters(self):\n \"\"\"Return a list of filters with N >= 1 files\"\"\"\n valid = []\n for k in self:\n if len(self[k]) > 0:\n valid.append(k)\n return valid\n", "source": "the_stack_v2_python_sparse", "source_path": "grizli/aws/aws_drizzler.py", "source_repo": "gbrammer/grizli", "split": "test", "star_events_count": 64} {"blob_id": "5122d304eda81ec5c68eea1a01ffda2baf609f2f", "bodies": ["pl = PageLogin(self.driver)\npl.quick_login()\nree = ReceiveEmail(self.driver)\nree.goto_inbox()\nself.driver.switch_to.frame('mainFrame')\nree.single_check(0)\nree.mark_as_unstar()\nself.driver.switch_to.default_content()\nassert ree.star_mail_statistics() == '24', '标记星标邮件失败'", "pl = PageLogin(self.driver)\npl.quick_login()\nree = ReceiveEmail(self.driver)\nree.goto_inbox()\nself.driver.switch_to.frame('mainFrame')\nree.check_multi(0)\nree.mark_as_unstar()\nself.driver.switch_to.default_content()\nassert ree.star_mail_statistics() <= '21', '标记星标邮件失败'", "pl = PageLogin(self.driver)\npl.quick_login()\nree = ReceiveEmail(self.driver)\nree.goto_inbox()\nself.driver.switch_to.frame('mainFrame')\nree.check_by_sender('罗江华', 0)\nree.mark_as_unstar()\nself.driver.switch_to.default_content()\nassert ree.star_mail_statistics() == '0', '标记星标邮件失败'", "pl = PageLogin(self.driver)\npl.quick_login()\nree = ReceiveEmail(self.driver)\nree.goto_inbox()\nself.driver.switch_to.frame('mainFrame')\nree.all_check()\nree.mark_as_unstar()\nself.driver.switch_to.default_content()\nassert ree.star_mail_statistics() == '0', '标记星标邮件失败'"], "bodies_text": "<|body_start_0|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.single_check(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '24', '标记星标邮件失败'\n<|end_body_0|>\n\n<|body_start_1|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_multi(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() <= '21', '标记星标邮件失败'\n<|end_body_1|>\n\n<|body_start_2|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_by_sender('罗江华', 0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n<|end_body_2|>\n\n<|body_start_3|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.all_check()\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n<|end_body_3|>\n", "class_docstring": "测试标记功能", "class_name": "TestMark", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TestMark:\n \"\"\"测试标记功能\"\"\"\n\n def test1_mark_as_unstar(self):\n \"\"\"测试随机单个邮件标记为星标\"\"\"\n <|body_0|>\n\n def test2_mark_as_unstar(self):\n \"\"\"测试随机批量邮件标记为星标\"\"\"\n <|body_1|>\n\n def test3_mark_as_unstar(self):\n \"\"\"测试按发件人姓名将邮件标记为星标\"\"\"\n <|body_2|>\n\n def test4_mark_as_unstar(self):\n \"\"\"测试页面全部邮件标记为星标\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.single_check(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '24', '标记星标邮件失败'\n<|end_body_0|>\n\n<|body_start_1|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_multi(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() <= '21', '标记星标邮件失败'\n<|end_body_1|>\n\n<|body_start_2|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_by_sender('罗江华', 0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n<|end_body_2|>\n\n<|body_start_3|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.all_check()\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000404", "length_bytes": 2811, "license_type": "no_license", "methods": [{"docstring": "测试随机单个邮件标记为星标", "name": "test1_mark_as_unstar", "signature": "def test1_mark_as_unstar(self)"}, {"docstring": "测试随机批量邮件标记为星标", "name": "test2_mark_as_unstar", "signature": "def test2_mark_as_unstar(self)"}, {"docstring": "测试按发件人姓名将邮件标记为星标", "name": "test3_mark_as_unstar", "signature": "def test3_mark_as_unstar(self)"}, {"docstring": "测试页面全部邮件标记为星标", "name": "test4_mark_as_unstar", "signature": "def test4_mark_as_unstar(self)"}], "n_methods": 4, "prompt": "Implement the Python class `TestMark` described below.\n\nClass description:\n测试标记功能\n\nMethod signatures and docstrings:\n- def test1_mark_as_unstar(self): 测试随机单个邮件标记为星标\n- def test2_mark_as_unstar(self): 测试随机批量邮件标记为星标\n- def test3_mark_as_unstar(self): 测试按发件人姓名将邮件标记为星标\n- def test4_mark_as_unstar(self): 测试页面全部邮件标记为星标", "prompted_full_text": "Implement the Python class `TestMark` described below.\n\nClass description:\n测试标记功能\n\nMethod signatures and docstrings:\n- def test1_mark_as_unstar(self): 测试随机单个邮件标记为星标\n- def test2_mark_as_unstar(self): 测试随机批量邮件标记为星标\n- def test3_mark_as_unstar(self): 测试按发件人姓名将邮件标记为星标\n- def test4_mark_as_unstar(self): 测试页面全部邮件标记为星标\n\n<|skeleton|>\nclass TestMark:\n \"\"\"测试标记功能\"\"\"\n\n def test1_mark_as_unstar(self):\n \"\"\"测试随机单个邮件标记为星标\"\"\"\n <|body_0|>\n\n def test2_mark_as_unstar(self):\n \"\"\"测试随机批量邮件标记为星标\"\"\"\n <|body_1|>\n\n def test3_mark_as_unstar(self):\n \"\"\"测试按发件人姓名将邮件标记为星标\"\"\"\n <|body_2|>\n\n def test4_mark_as_unstar(self):\n \"\"\"测试页面全部邮件标记为星标\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.single_check(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '24', '标记星标邮件失败'\n<|end_body_0|>\n\n<|body_start_1|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_multi(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() <= '21', '标记星标邮件失败'\n<|end_body_1|>\n\n<|body_start_2|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_by_sender('罗江华', 0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n<|end_body_2|>\n\n<|body_start_3|>\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.all_check()\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n<|end_body_3|>\n", "revision_id": "d6fb7c64903dfbf89f9b10f4bc3beb72e7c251f5", "skeleton": "<|skeleton|>\nclass TestMark:\n \"\"\"测试标记功能\"\"\"\n\n def test1_mark_as_unstar(self):\n \"\"\"测试随机单个邮件标记为星标\"\"\"\n <|body_0|>\n\n def test2_mark_as_unstar(self):\n \"\"\"测试随机批量邮件标记为星标\"\"\"\n <|body_1|>\n\n def test3_mark_as_unstar(self):\n \"\"\"测试按发件人姓名将邮件标记为星标\"\"\"\n <|body_2|>\n\n def test4_mark_as_unstar(self):\n \"\"\"测试页面全部邮件标记为星标\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TestMark:\n \"\"\"测试标记功能\"\"\"\n\n def test1_mark_as_unstar(self):\n \"\"\"测试随机单个邮件标记为星标\"\"\"\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.single_check(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '24', '标记星标邮件失败'\n\n def test2_mark_as_unstar(self):\n \"\"\"测试随机批量邮件标记为星标\"\"\"\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_multi(0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() <= '21', '标记星标邮件失败'\n\n def test3_mark_as_unstar(self):\n \"\"\"测试按发件人姓名将邮件标记为星标\"\"\"\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.check_by_sender('罗江华', 0)\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n\n def test4_mark_as_unstar(self):\n \"\"\"测试页面全部邮件标记为星标\"\"\"\n pl = PageLogin(self.driver)\n pl.quick_login()\n ree = ReceiveEmail(self.driver)\n ree.goto_inbox()\n self.driver.switch_to.frame('mainFrame')\n ree.all_check()\n ree.mark_as_unstar()\n self.driver.switch_to.default_content()\n assert ree.star_mail_statistics() == '0', '标记星标邮件失败'\n", "source": "the_stack_v2_python_sparse", "source_path": "QQ_mail_auto_test/mail_auto_test/test_case/testK_mark_as_unstar.py", "source_repo": "jianghualuo/python_selenium", "split": "test", "star_events_count": 0} {"blob_id": "1486af3848f9ce74c1c9ad17c7fc0fe6a6c3b2bb", "bodies": ["if self.action in ['login', 'signup', 'verify']:\n permissions = [AllowAny]\nelif self.action in ['profile', 'retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\nelse:\n permissions = [IsAuthenticated]\nreturn [p() for p in permissions]", "serializer = UserLoginSerializer(data=request.data)\nserializer.is_valid(raise_exception=True)\nuser, token = serializer.save()\ndata = {'user': UserModelSerializer(user).data, 'token': str(token.access_token)}\nreturn Response(data, status=status.HTTP_200_OK)", "serializer = UserSignUpSerializer(data=request.data)\nserializer.is_valid(raise_exception=True)\nuser = serializer.save()\ndata = UserModelSerializer(user).data\nreturn Response(data, status=status.HTTP_201_CREATED)", "serializer = AccountVerificationSerializer(data=request.data)\nserializer.is_valid(raise_exception=True)\nserializer.save()\ndata = {'message': '\\\\,,/(^_^)\\\\,,/ Congrats! u r verified'}\nreturn Response(data, status=status.HTTP_200_OK)", "user = self.get_object()\nprofile = user.profile\npartial = request.method == 'PATCH'\nserializer = ProfileModelSerializer(profile, data=request.data, partial=partial)\nserializer.is_valid(raise_exception=True)\nserializer.save()\ndata = UserModelSerializer(user).data\nprint(data)\nreturn Response(data)"], "bodies_text": "<|body_start_0|>\n if self.action in ['login', 'signup', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['profile', 'retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]\n<|end_body_0|>\n\n<|body_start_1|>\n serializer = UserLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user, token = serializer.save()\n data = {'user': UserModelSerializer(user).data, 'token': str(token.access_token)}\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n serializer = UserSignUpSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n data = UserModelSerializer(user).data\n return Response(data, status=status.HTTP_201_CREATED)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = AccountVerificationSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = {'message': '\\\\,,/(^_^)\\\\,,/ Congrats! u r verified'}\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.get_object()\n profile = user.profile\n partial = request.method == 'PATCH'\n serializer = ProfileModelSerializer(profile, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = UserModelSerializer(user).data\n print(data)\n return Response(data)\n<|end_body_4|>\n", "class_docstring": "User view set. Handle sign up, login and account verification.", "class_name": "UserViewSet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserViewSet:\n \"\"\"User view set. Handle sign up, login and account verification.\"\"\"\n\n def get_permissions(self):\n \"\"\"Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed\"\"\"\n <|body_0|>\n\n def login(self, request):\n \"\"\"User sign in.\"\"\"\n <|body_1|>\n\n def signup(self, request):\n \"\"\"User sign up.\"\"\"\n <|body_2|>\n\n def verify(self, request):\n \"\"\"Account verification.\"\"\"\n <|body_3|>\n\n def profile(self, request, *args, **kwargs):\n \"\"\"Update profile data.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.action in ['login', 'signup', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['profile', 'retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]\n<|end_body_0|>\n\n<|body_start_1|>\n serializer = UserLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user, token = serializer.save()\n data = {'user': UserModelSerializer(user).data, 'token': str(token.access_token)}\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n serializer = UserSignUpSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n data = UserModelSerializer(user).data\n return Response(data, status=status.HTTP_201_CREATED)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = AccountVerificationSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = {'message': '\\\\,,/(^_^)\\\\,,/ Congrats! u r verified'}\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.get_object()\n profile = user.profile\n partial = request.method == 'PATCH'\n serializer = ProfileModelSerializer(profile, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = UserModelSerializer(user).data\n print(data)\n return Response(data)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000405", "length_bytes": 3563, "license_type": "permissive", "methods": [{"docstring": "Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed", "name": "get_permissions", "signature": "def get_permissions(self)"}, {"docstring": "User sign in.", "name": "login", "signature": "def login(self, request)"}, {"docstring": "User sign up.", "name": "signup", "signature": "def signup(self, request)"}, {"docstring": "Account verification.", "name": "verify", "signature": "def verify(self, request)"}, {"docstring": "Update profile data.", "name": "profile", "signature": "def profile(self, request, *args, **kwargs)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_032325", "prompt": "Implement the Python class `UserViewSet` described below.\n\nClass description:\nUser view set. Handle sign up, login and account verification.\n\nMethod signatures and docstrings:\n- def get_permissions(self): Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed\n- def login(self, request): User sign in.\n- def signup(self, request): User sign up.\n- def verify(self, request): Account verification.\n- def profile(self, request, *args, **kwargs): Update profile data.", "prompted_full_text": "Implement the Python class `UserViewSet` described below.\n\nClass description:\nUser view set. Handle sign up, login and account verification.\n\nMethod signatures and docstrings:\n- def get_permissions(self): Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed\n- def login(self, request): User sign in.\n- def signup(self, request): User sign up.\n- def verify(self, request): Account verification.\n- def profile(self, request, *args, **kwargs): Update profile data.\n\n<|skeleton|>\nclass UserViewSet:\n \"\"\"User view set. Handle sign up, login and account verification.\"\"\"\n\n def get_permissions(self):\n \"\"\"Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed\"\"\"\n <|body_0|>\n\n def login(self, request):\n \"\"\"User sign in.\"\"\"\n <|body_1|>\n\n def signup(self, request):\n \"\"\"User sign up.\"\"\"\n <|body_2|>\n\n def verify(self, request):\n \"\"\"Account verification.\"\"\"\n <|body_3|>\n\n def profile(self, request, *args, **kwargs):\n \"\"\"Update profile data.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.action in ['login', 'signup', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['profile', 'retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]\n<|end_body_0|>\n\n<|body_start_1|>\n serializer = UserLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user, token = serializer.save()\n data = {'user': UserModelSerializer(user).data, 'token': str(token.access_token)}\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_1|>\n\n<|body_start_2|>\n serializer = UserSignUpSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n data = UserModelSerializer(user).data\n return Response(data, status=status.HTTP_201_CREATED)\n<|end_body_2|>\n\n<|body_start_3|>\n serializer = AccountVerificationSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = {'message': '\\\\,,/(^_^)\\\\,,/ Congrats! u r verified'}\n return Response(data, status=status.HTTP_200_OK)\n<|end_body_3|>\n\n<|body_start_4|>\n user = self.get_object()\n profile = user.profile\n partial = request.method == 'PATCH'\n serializer = ProfileModelSerializer(profile, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = UserModelSerializer(user).data\n print(data)\n return Response(data)\n<|end_body_4|>\n", "revision_id": "9693e95ca723f1846666838a24c60d0715224e36", "skeleton": "<|skeleton|>\nclass UserViewSet:\n \"\"\"User view set. Handle sign up, login and account verification.\"\"\"\n\n def get_permissions(self):\n \"\"\"Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed\"\"\"\n <|body_0|>\n\n def login(self, request):\n \"\"\"User sign in.\"\"\"\n <|body_1|>\n\n def signup(self, request):\n \"\"\"User sign up.\"\"\"\n <|body_2|>\n\n def verify(self, request):\n \"\"\"Account verification.\"\"\"\n <|body_3|>\n\n def profile(self, request, *args, **kwargs):\n \"\"\"Update profile data.\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserViewSet:\n \"\"\"User view set. Handle sign up, login and account verification.\"\"\"\n\n def get_permissions(self):\n \"\"\"Assign permissions based on action. If the action is retrieve, custom permission is added so that only the same User can be edited and viewed\"\"\"\n if self.action in ['login', 'signup', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['profile', 'retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]\n\n def login(self, request):\n \"\"\"User sign in.\"\"\"\n serializer = UserLoginSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user, token = serializer.save()\n data = {'user': UserModelSerializer(user).data, 'token': str(token.access_token)}\n return Response(data, status=status.HTTP_200_OK)\n\n def signup(self, request):\n \"\"\"User sign up.\"\"\"\n serializer = UserSignUpSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n user = serializer.save()\n data = UserModelSerializer(user).data\n return Response(data, status=status.HTTP_201_CREATED)\n\n def verify(self, request):\n \"\"\"Account verification.\"\"\"\n serializer = AccountVerificationSerializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = {'message': '\\\\,,/(^_^)\\\\,,/ Congrats! u r verified'}\n return Response(data, status=status.HTTP_200_OK)\n\n def profile(self, request, *args, **kwargs):\n \"\"\"Update profile data.\"\"\"\n user = self.get_object()\n profile = user.profile\n partial = request.method == 'PATCH'\n serializer = ProfileModelSerializer(profile, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n data = UserModelSerializer(user).data\n print(data)\n return Response(data)\n", "source": "the_stack_v2_python_sparse", "source_path": "root/users/api/views/users.py", "source_repo": "macknilan/Post-prueba-tecnica-desarrollador-python", "split": "test", "star_events_count": 1} {"blob_id": "e56ef8154e810b318a2c122efcf9df289d6bd2a6", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.", "class_name": "AssetServiceServicer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AssetServiceServicer:\n \"\"\"Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.\"\"\"\n\n def GetAsset(self, request, context):\n \"\"\"Returns the requested asset in full detail.\"\"\"\n <|body_0|>\n\n def MutateAssets(self, request, context):\n \"\"\"Creates assets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000406", "length_bytes": 5542, "license_type": "permissive", "methods": [{"docstring": "Returns the requested asset in full detail.", "name": "GetAsset", "signature": "def GetAsset(self, request, context)"}, {"docstring": "Creates assets. Operation statuses are returned.", "name": "MutateAssets", "signature": "def MutateAssets(self, request, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_018301", "prompt": "Implement the Python class `AssetServiceServicer` described below.\n\nClass description:\nProto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.\n\nMethod signatures and docstrings:\n- def GetAsset(self, request, context): Returns the requested asset in full detail.\n- def MutateAssets(self, request, context): Creates assets. Operation statuses are returned.", "prompted_full_text": "Implement the Python class `AssetServiceServicer` described below.\n\nClass description:\nProto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.\n\nMethod signatures and docstrings:\n- def GetAsset(self, request, context): Returns the requested asset in full detail.\n- def MutateAssets(self, request, context): Creates assets. Operation statuses are returned.\n\n<|skeleton|>\nclass AssetServiceServicer:\n \"\"\"Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.\"\"\"\n\n def GetAsset(self, request, context):\n \"\"\"Returns the requested asset in full detail.\"\"\"\n <|body_0|>\n\n def MutateAssets(self, request, context):\n \"\"\"Creates assets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "a5b6cede64f4d9912ae6ad26927a54e40448c9fe", "skeleton": "<|skeleton|>\nclass AssetServiceServicer:\n \"\"\"Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.\"\"\"\n\n def GetAsset(self, request, context):\n \"\"\"Returns the requested asset in full detail.\"\"\"\n <|body_0|>\n\n def MutateAssets(self, request, context):\n \"\"\"Creates assets. Operation statuses are returned.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AssetServiceServicer:\n \"\"\"Proto file describing the Asset service. Service to manage assets. Asset types can be created with AssetService are YoutubeVideoAsset, MediaBundleAsset and ImageAsset. TextAsset should be created with Ad inline.\"\"\"\n\n def GetAsset(self, request, context):\n \"\"\"Returns the requested asset in full detail.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def MutateAssets(self, request, context):\n \"\"\"Creates assets. Operation statuses are returned.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "google/ads/google_ads/v5/proto/services/asset_service_pb2_grpc.py", "source_repo": "fiboknacky/google-ads-python", "split": "test", "star_events_count": 0} {"blob_id": "b5dc710fb4b08393cf9e5ccf33b65be78212f9ef", "bodies": ["np.random.seed(0)\nself.model = model\nself.d = model.d\nself.num_thetas = num_thetas\nself.num_inputs = num_inputs\nrep_range = np.tile(np.array([num_inputs]), num_thetas)\nparams = np.random.uniform(self.model.thetamin, self.model.thetamax, size=(self.num_thetas, model.d))\nself.params = np.repeat(params, repeats=rep_range, axis=0)\nx = np.random.normal(0, 1, size=(self.num_inputs, self.model.inputsize))\nself.x = np.tile(x, (self.num_thetas, 1))", "grads = self.model.get_gradient(params=self.params, x=self.x)\noutput = self.model.forward(params=self.params, x=self.x)\nfishers = self.model.get_fisher(gradients=grads, model_output=output)\nfisher_trace = np.trace(np.average(fishers, axis=0))\nfisher = np.average(np.reshape(fishers, (self.num_thetas, self.num_inputs, self.d, self.d)), axis=1)\nf_hat = self.d * fisher / fisher_trace\nreturn (f_hat, fisher_trace)", "effective_dim = []\nfor ns in n:\n Fhat = f_hat * ns / (2 * pi * np.log(ns))\n one_plus_F = np.eye(self.d) + Fhat\n det = np.linalg.slogdet(one_plus_F)[1]\n r = det / 2\n effective_dim.append(2 * (logsumexp(r) - np.log(self.num_thetas)) / np.log(ns / (2 * pi * np.log(ns))))\nreturn effective_dim"], "bodies_text": "<|body_start_0|>\n np.random.seed(0)\n self.model = model\n self.d = model.d\n self.num_thetas = num_thetas\n self.num_inputs = num_inputs\n rep_range = np.tile(np.array([num_inputs]), num_thetas)\n params = np.random.uniform(self.model.thetamin, self.model.thetamax, size=(self.num_thetas, model.d))\n self.params = np.repeat(params, repeats=rep_range, axis=0)\n x = np.random.normal(0, 1, size=(self.num_inputs, self.model.inputsize))\n self.x = np.tile(x, (self.num_thetas, 1))\n<|end_body_0|>\n\n<|body_start_1|>\n grads = self.model.get_gradient(params=self.params, x=self.x)\n output = self.model.forward(params=self.params, x=self.x)\n fishers = self.model.get_fisher(gradients=grads, model_output=output)\n fisher_trace = np.trace(np.average(fishers, axis=0))\n fisher = np.average(np.reshape(fishers, (self.num_thetas, self.num_inputs, self.d, self.d)), axis=1)\n f_hat = self.d * fisher / fisher_trace\n return (f_hat, fisher_trace)\n<|end_body_1|>\n\n<|body_start_2|>\n effective_dim = []\n for ns in n:\n Fhat = f_hat * ns / (2 * pi * np.log(ns))\n one_plus_F = np.eye(self.d) + Fhat\n det = np.linalg.slogdet(one_plus_F)[1]\n r = det / 2\n effective_dim.append(2 * (logsumexp(r) - np.log(self.num_thetas)) / np.log(ns / (2 * pi * np.log(ns))))\n return effective_dim\n<|end_body_2|>\n", "class_docstring": "", "class_name": "EffectiveDimension", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EffectiveDimension:\n\n def __init__(self, model, num_thetas, num_inputs):\n \"\"\"Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include\"\"\"\n <|body_0|>\n\n def get_fhat(self):\n \"\"\":return: ndarray, f_hat values of size (num_inputs, d, d)\"\"\"\n <|body_1|>\n\n def eff_dim(self, f_hat, n):\n \"\"\"Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n np.random.seed(0)\n self.model = model\n self.d = model.d\n self.num_thetas = num_thetas\n self.num_inputs = num_inputs\n rep_range = np.tile(np.array([num_inputs]), num_thetas)\n params = np.random.uniform(self.model.thetamin, self.model.thetamax, size=(self.num_thetas, model.d))\n self.params = np.repeat(params, repeats=rep_range, axis=0)\n x = np.random.normal(0, 1, size=(self.num_inputs, self.model.inputsize))\n self.x = np.tile(x, (self.num_thetas, 1))\n<|end_body_0|>\n\n<|body_start_1|>\n grads = self.model.get_gradient(params=self.params, x=self.x)\n output = self.model.forward(params=self.params, x=self.x)\n fishers = self.model.get_fisher(gradients=grads, model_output=output)\n fisher_trace = np.trace(np.average(fishers, axis=0))\n fisher = np.average(np.reshape(fishers, (self.num_thetas, self.num_inputs, self.d, self.d)), axis=1)\n f_hat = self.d * fisher / fisher_trace\n return (f_hat, fisher_trace)\n<|end_body_1|>\n\n<|body_start_2|>\n effective_dim = []\n for ns in n:\n Fhat = f_hat * ns / (2 * pi * np.log(ns))\n one_plus_F = np.eye(self.d) + Fhat\n det = np.linalg.slogdet(one_plus_F)[1]\n r = det / 2\n effective_dim.append(2 * (logsumexp(r) - np.log(self.num_thetas)) / np.log(ns / (2 * pi * np.log(ns))))\n return effective_dim\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000407", "length_bytes": 14427, "license_type": "permissive", "methods": [{"docstring": "Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include", "name": "__init__", "signature": "def __init__(self, model, num_thetas, num_inputs)"}, {"docstring": ":return: ndarray, f_hat values of size (num_inputs, d, d)", "name": "get_fhat", "signature": "def get_fhat(self)"}, {"docstring": "Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n", "name": "eff_dim", "signature": "def eff_dim(self, f_hat, n)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_034718", "prompt": "Implement the Python class `EffectiveDimension` described below.\n\nClass description:\nImplement the EffectiveDimension class.\n\nMethod signatures and docstrings:\n- def __init__(self, model, num_thetas, num_inputs): Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include\n- def get_fhat(self): :return: ndarray, f_hat values of size (num_inputs, d, d)\n- def eff_dim(self, f_hat, n): Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n", "prompted_full_text": "Implement the Python class `EffectiveDimension` described below.\n\nClass description:\nImplement the EffectiveDimension class.\n\nMethod signatures and docstrings:\n- def __init__(self, model, num_thetas, num_inputs): Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include\n- def get_fhat(self): :return: ndarray, f_hat values of size (num_inputs, d, d)\n- def eff_dim(self, f_hat, n): Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n\n\n<|skeleton|>\nclass EffectiveDimension:\n\n def __init__(self, model, num_thetas, num_inputs):\n \"\"\"Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include\"\"\"\n <|body_0|>\n\n def get_fhat(self):\n \"\"\":return: ndarray, f_hat values of size (num_inputs, d, d)\"\"\"\n <|body_1|>\n\n def eff_dim(self, f_hat, n):\n \"\"\"Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n np.random.seed(0)\n self.model = model\n self.d = model.d\n self.num_thetas = num_thetas\n self.num_inputs = num_inputs\n rep_range = np.tile(np.array([num_inputs]), num_thetas)\n params = np.random.uniform(self.model.thetamin, self.model.thetamax, size=(self.num_thetas, model.d))\n self.params = np.repeat(params, repeats=rep_range, axis=0)\n x = np.random.normal(0, 1, size=(self.num_inputs, self.model.inputsize))\n self.x = np.tile(x, (self.num_thetas, 1))\n<|end_body_0|>\n\n<|body_start_1|>\n grads = self.model.get_gradient(params=self.params, x=self.x)\n output = self.model.forward(params=self.params, x=self.x)\n fishers = self.model.get_fisher(gradients=grads, model_output=output)\n fisher_trace = np.trace(np.average(fishers, axis=0))\n fisher = np.average(np.reshape(fishers, (self.num_thetas, self.num_inputs, self.d, self.d)), axis=1)\n f_hat = self.d * fisher / fisher_trace\n return (f_hat, fisher_trace)\n<|end_body_1|>\n\n<|body_start_2|>\n effective_dim = []\n for ns in n:\n Fhat = f_hat * ns / (2 * pi * np.log(ns))\n one_plus_F = np.eye(self.d) + Fhat\n det = np.linalg.slogdet(one_plus_F)[1]\n r = det / 2\n effective_dim.append(2 * (logsumexp(r) - np.log(self.num_thetas)) / np.log(ns / (2 * pi * np.log(ns))))\n return effective_dim\n<|end_body_2|>\n", "revision_id": "5d9a9b638967bee5ff848c9564f4c90849afc5ca", "skeleton": "<|skeleton|>\nclass EffectiveDimension:\n\n def __init__(self, model, num_thetas, num_inputs):\n \"\"\"Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include\"\"\"\n <|body_0|>\n\n def get_fhat(self):\n \"\"\":return: ndarray, f_hat values of size (num_inputs, d, d)\"\"\"\n <|body_1|>\n\n def eff_dim(self, f_hat, n):\n \"\"\"Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EffectiveDimension:\n def __init__(self, model, num_thetas, num_inputs):\n \"\"\"Computes the effective dimension for a parameterised model. :param model: class instance (i think that's how you call it?) :param num_thetas: int, number of parameter sets to include :param num_inputs: int, number of input samples to include\"\"\"\n np.random.seed(0)\n self.model = model\n self.d = model.d\n self.num_thetas = num_thetas\n self.num_inputs = num_inputs\n rep_range = np.tile(np.array([num_inputs]), num_thetas)\n params = np.random.uniform(self.model.thetamin, self.model.thetamax, size=(self.num_thetas, model.d))\n self.params = np.repeat(params, repeats=rep_range, axis=0)\n x = np.random.normal(0, 1, size=(self.num_inputs, self.model.inputsize))\n self.x = np.tile(x, (self.num_thetas, 1))\n\n def get_fhat(self):\n \"\"\":return: ndarray, f_hat values of size (num_inputs, d, d)\"\"\"\n grads = self.model.get_gradient(params=self.params, x=self.x)\n output = self.model.forward(params=self.params, x=self.x)\n fishers = self.model.get_fisher(gradients=grads, model_output=output)\n fisher_trace = np.trace(np.average(fishers, axis=0))\n fisher = np.average(np.reshape(fishers, (self.num_thetas, self.num_inputs, self.d, self.d)), axis=1)\n f_hat = self.d * fisher / fisher_trace\n return (f_hat, fisher_trace)\n\n def eff_dim(self, f_hat, n):\n \"\"\"Compute the effective dimension. :param f_hat: ndarray :param n: list, used to represent number of data samples available as per the effective dimension calc :return: list, effective dimension for each n\"\"\"\n effective_dim = []\n for ns in n:\n Fhat = f_hat * ns / (2 * pi * np.log(ns))\n one_plus_F = np.eye(self.d) + Fhat\n det = np.linalg.slogdet(one_plus_F)[1]\n r = det / 2\n effective_dim.append(2 * (logsumexp(r) - np.log(self.num_thetas)) / np.log(ns / (2 * pi * np.log(ns))))\n return effective_dim\n", "source": "the_stack_v2_python_sparse", "source_path": "Noise_plots/eigenvalue_distribution/generate_data/functions/functions.py", "source_repo": "amyami187/effective_dimension", "split": "test", "star_events_count": 30} {"blob_id": "f7afa6e58023edd9127289644c31de2a9538f499", "bodies": ["super().__init__(config=config, timezone=timezone, logger=logger)\nif not api_url:\n self.api_url = 'https://api.uezo.net/mecab/parse'\n self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')\nelse:\n self.api_url = api_url", "ret = []\nif not text:\n return ret\ntry:\n parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()\n ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]\nexcept Exception as ex:\n self.logger.error('MeCab Service parsing error: ' + str(ex) + '\\n' + traceback.format_exc())\nreturn ret"], "bodies_text": "<|body_start_0|>\n super().__init__(config=config, timezone=timezone, logger=logger)\n if not api_url:\n self.api_url = 'https://api.uezo.net/mecab/parse'\n self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')\n else:\n self.api_url = api_url\n<|end_body_0|>\n\n<|body_start_1|>\n ret = []\n if not text:\n return ret\n try:\n parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()\n ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]\n except Exception as ex:\n self.logger.error('MeCab Service parsing error: ' + str(ex) + '\\n' + traceback.format_exc())\n return ret\n<|end_body_1|>\n", "class_docstring": "Tagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API", "class_name": "MeCabServiceTagger", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MeCabServiceTagger:\n \"\"\"Tagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API\"\"\"\n\n def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs):\n \"\"\"Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.\"\"\"\n <|body_0|>\n\n def parse(self, text):\n \"\"\"Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config=config, timezone=timezone, logger=logger)\n if not api_url:\n self.api_url = 'https://api.uezo.net/mecab/parse'\n self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')\n else:\n self.api_url = api_url\n<|end_body_0|>\n\n<|body_start_1|>\n ret = []\n if not text:\n return ret\n try:\n parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()\n ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]\n except Exception as ex:\n self.logger.error('MeCab Service parsing error: ' + str(ex) + '\\n' + traceback.format_exc())\n return ret\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000408", "length_bytes": 3495, "license_type": "permissive", "methods": [{"docstring": "Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.", "name": "__init__", "signature": "def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs)"}, {"docstring": "Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes", "name": "parse", "signature": "def parse(self, text)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_018726", "prompt": "Implement the Python class `MeCabServiceTagger` described below.\n\nClass description:\nTagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API\n\nMethod signatures and docstrings:\n- def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs): Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.\n- def parse(self, text): Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes", "prompted_full_text": "Implement the Python class `MeCabServiceTagger` described below.\n\nClass description:\nTagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API\n\nMethod signatures and docstrings:\n- def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs): Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.\n- def parse(self, text): Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes\n\n<|skeleton|>\nclass MeCabServiceTagger:\n \"\"\"Tagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API\"\"\"\n\n def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs):\n \"\"\"Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.\"\"\"\n <|body_0|>\n\n def parse(self, text):\n \"\"\"Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(config=config, timezone=timezone, logger=logger)\n if not api_url:\n self.api_url = 'https://api.uezo.net/mecab/parse'\n self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')\n else:\n self.api_url = api_url\n<|end_body_0|>\n\n<|body_start_1|>\n ret = []\n if not text:\n return ret\n try:\n parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()\n ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]\n except Exception as ex:\n self.logger.error('MeCab Service parsing error: ' + str(ex) + '\\n' + traceback.format_exc())\n return ret\n<|end_body_1|>\n", "revision_id": "dd8cd7d244b6e6e4133c8e73d637ded8a8c6846f", "skeleton": "<|skeleton|>\nclass MeCabServiceTagger:\n \"\"\"Tagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API\"\"\"\n\n def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs):\n \"\"\"Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.\"\"\"\n <|body_0|>\n\n def parse(self, text):\n \"\"\"Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MeCabServiceTagger:\n \"\"\"Tagger using mecab-service Attributes ---------- config : minette.Config Configuration timezone : pytz.timezone Timezone logger : logging.Logger Logger api_url : str URL for MeCabService API\"\"\"\n\n def __init__(self, config=None, timezone=None, logger=None, *, api_url=None, **kwargs):\n \"\"\"Parameters ---------- config : Config, default None Configuration timezone : timezone, default None Timezone logger : Logger, default None Logger api_url : str, default None URL for MeCabService API. If None trial URL is used.\"\"\"\n super().__init__(config=config, timezone=timezone, logger=logger)\n if not api_url:\n self.api_url = 'https://api.uezo.net/mecab/parse'\n self.logger.warning('Do not use default API URL for the production environment. This is for trial use only. Install MeCab and use MeCabTagger instead.')\n else:\n self.api_url = api_url\n\n def parse(self, text):\n \"\"\"Parse and annotate using MeCab Service Parameters ---------- text : str Text to analyze Returns ------- words : list of minette.MeCabServiceNode MeCabService nodes\"\"\"\n ret = []\n if not text:\n return ret\n try:\n parsed_json = requests.post(self.api_url, headers={'content-type': 'application/json'}, json={'text': text}, timeout=10).json()\n ret = [MeCabServiceNode.create(n['surface'], n['features']) for n in parsed_json['nodes']]\n except Exception as ex:\n self.logger.error('MeCab Service parsing error: ' + str(ex) + '\\n' + traceback.format_exc())\n return ret\n", "source": "the_stack_v2_python_sparse", "source_path": "minette/tagger/mecabservice.py", "source_repo": "uezo/minette-python", "split": "test", "star_events_count": 33} {"blob_id": "df6aaae80a22ca5cecb45e8c9f45398dd4200a03", "bodies": ["if walk._next is None:\n return (walk, walk)\nelse:\n head, tail = self._recursive_reverse(walk._next)\n tail._next = walk\n walk._next = None\n return (head, walk)", "if self.is_empty():\n return\nhead, tail = self._recursive_reverse(self._head)\nself._head = head\nself._tail = tail", "if self._size <= 1:\n return\nhead = self._head\npre_walk = self._head._next\nwalk = pre_walk._next\nhead._next = None\nwhile walk is not None:\n pre_walk._next = head\n head = pre_walk\n pre_walk = walk\n walk = walk._next\npre_walk._next = head\nhead = pre_walk\nself._tail = self._head\nself._head = head"], "bodies_text": "<|body_start_0|>\n if walk._next is None:\n return (walk, walk)\n else:\n head, tail = self._recursive_reverse(walk._next)\n tail._next = walk\n walk._next = None\n return (head, walk)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.is_empty():\n return\n head, tail = self._recursive_reverse(self._head)\n self._head = head\n self._tail = tail\n<|end_body_1|>\n\n<|body_start_2|>\n if self._size <= 1:\n return\n head = self._head\n pre_walk = self._head._next\n walk = pre_walk._next\n head._next = None\n while walk is not None:\n pre_walk._next = head\n head = pre_walk\n pre_walk = walk\n walk = walk._next\n pre_walk._next = head\n head = pre_walk\n self._tail = self._head\n self._head = head\n<|end_body_2|>\n", "class_docstring": "", "class_name": "SList", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SList:\n\n def _recursive_reverse(self, walk):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n <|body_0|>\n\n def recursive_reverse(self):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n <|body_1|>\n\n def iterate_reverse(self):\n \"\"\"Reverse the singly linked list by iterating.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if walk._next is None:\n return (walk, walk)\n else:\n head, tail = self._recursive_reverse(walk._next)\n tail._next = walk\n walk._next = None\n return (head, walk)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.is_empty():\n return\n head, tail = self._recursive_reverse(self._head)\n self._head = head\n self._tail = tail\n<|end_body_1|>\n\n<|body_start_2|>\n if self._size <= 1:\n return\n head = self._head\n pre_walk = self._head._next\n walk = pre_walk._next\n head._next = None\n while walk is not None:\n pre_walk._next = head\n head = pre_walk\n pre_walk = walk\n walk = walk._next\n pre_walk._next = head\n head = pre_walk\n self._tail = self._head\n self._head = head\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000409", "length_bytes": 1487, "license_type": "no_license", "methods": [{"docstring": "Reverse the singly linked list by recursiving.", "name": "_recursive_reverse", "signature": "def _recursive_reverse(self, walk)"}, {"docstring": "Reverse the singly linked list by recursiving.", "name": "recursive_reverse", "signature": "def recursive_reverse(self)"}, {"docstring": "Reverse the singly linked list by iterating.", "name": "iterate_reverse", "signature": "def iterate_reverse(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_002336", "prompt": "Implement the Python class `SList` described below.\n\nClass description:\nImplement the SList class.\n\nMethod signatures and docstrings:\n- def _recursive_reverse(self, walk): Reverse the singly linked list by recursiving.\n- def recursive_reverse(self): Reverse the singly linked list by recursiving.\n- def iterate_reverse(self): Reverse the singly linked list by iterating.", "prompted_full_text": "Implement the Python class `SList` described below.\n\nClass description:\nImplement the SList class.\n\nMethod signatures and docstrings:\n- def _recursive_reverse(self, walk): Reverse the singly linked list by recursiving.\n- def recursive_reverse(self): Reverse the singly linked list by recursiving.\n- def iterate_reverse(self): Reverse the singly linked list by iterating.\n\n<|skeleton|>\nclass SList:\n\n def _recursive_reverse(self, walk):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n <|body_0|>\n\n def recursive_reverse(self):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n <|body_1|>\n\n def iterate_reverse(self):\n \"\"\"Reverse the singly linked list by iterating.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if walk._next is None:\n return (walk, walk)\n else:\n head, tail = self._recursive_reverse(walk._next)\n tail._next = walk\n walk._next = None\n return (head, walk)\n<|end_body_0|>\n\n<|body_start_1|>\n if self.is_empty():\n return\n head, tail = self._recursive_reverse(self._head)\n self._head = head\n self._tail = tail\n<|end_body_1|>\n\n<|body_start_2|>\n if self._size <= 1:\n return\n head = self._head\n pre_walk = self._head._next\n walk = pre_walk._next\n head._next = None\n while walk is not None:\n pre_walk._next = head\n head = pre_walk\n pre_walk = walk\n walk = walk._next\n pre_walk._next = head\n head = pre_walk\n self._tail = self._head\n self._head = head\n<|end_body_2|>\n", "revision_id": "70b23ead7a89e46a84d9d914e7c8fa678edd1f90", "skeleton": "<|skeleton|>\nclass SList:\n\n def _recursive_reverse(self, walk):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n <|body_0|>\n\n def recursive_reverse(self):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n <|body_1|>\n\n def iterate_reverse(self):\n \"\"\"Reverse the singly linked list by iterating.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SList:\n def _recursive_reverse(self, walk):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n if walk._next is None:\n return (walk, walk)\n else:\n head, tail = self._recursive_reverse(walk._next)\n tail._next = walk\n walk._next = None\n return (head, walk)\n\n def recursive_reverse(self):\n \"\"\"Reverse the singly linked list by recursiving.\"\"\"\n if self.is_empty():\n return\n head, tail = self._recursive_reverse(self._head)\n self._head = head\n self._tail = tail\n\n def iterate_reverse(self):\n \"\"\"Reverse the singly linked list by iterating.\"\"\"\n if self._size <= 1:\n return\n head = self._head\n pre_walk = self._head._next\n walk = pre_walk._next\n head._next = None\n while walk is not None:\n pre_walk._next = head\n head = pre_walk\n pre_walk = walk\n walk = walk._next\n pre_walk._next = head\n head = pre_walk\n self._tail = self._head\n self._head = head\n", "source": "the_stack_v2_python_sparse", "source_path": "linded_list_ch07/creativity/reverse_singly_list_c7_29.py", "source_repo": "wanyikang/dsap", "split": "test", "star_events_count": 1} {"blob_id": "d97991577af3fa405d7995ae49df4886b0476482", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "class_docstring": "Missing associated documentation comment in .proto file", "class_name": "AuthenticationServicer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AuthenticationServicer:\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n\n def authenticate(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n <|body_0|>\n\n def validateToken(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000410", "length_bytes": 7766, "license_type": "no_license", "methods": [{"docstring": "Missing associated documentation comment in .proto file", "name": "authenticate", "signature": "def authenticate(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file", "name": "validateToken", "signature": "def validateToken(self, request, context)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_051856", "prompt": "Implement the Python class `AuthenticationServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file\n\nMethod signatures and docstrings:\n- def authenticate(self, request, context): Missing associated documentation comment in .proto file\n- def validateToken(self, request, context): Missing associated documentation comment in .proto file", "prompted_full_text": "Implement the Python class `AuthenticationServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file\n\nMethod signatures and docstrings:\n- def authenticate(self, request, context): Missing associated documentation comment in .proto file\n- def validateToken(self, request, context): Missing associated documentation comment in .proto file\n\n<|skeleton|>\nclass AuthenticationServicer:\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n\n def authenticate(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n <|body_0|>\n\n def validateToken(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n", "revision_id": "626dae0efa20a66d1f69f49be15ab90c623ec33b", "skeleton": "<|skeleton|>\nclass AuthenticationServicer:\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n\n def authenticate(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n <|body_0|>\n\n def validateToken(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AuthenticationServicer:\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n\n def authenticate(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def validateToken(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "svms-job-module-service/backup/connectors/vms_profile_manager/protoc/profile_manager_pb2_grpc.py", "source_repo": "shankarmahato/Job_module", "split": "test", "star_events_count": 0} {"blob_id": "d2dbe34afa1cf3fb72d70ea5bd4212d0c3aeb9d4", "bodies": ["params = get_params(locals())\nraw_result = await self.api_request('get', params)\nif return_raw_response:\n return raw_result\nresult = StorageGetResponse(**raw_result)\nreturn result", "params = get_params(locals())\nraw_result = await self.api_request('getKeys', params)\nif return_raw_response:\n return raw_result\nresult = StorageGetKeysResponse(**raw_result)\nreturn result", "params = get_params(locals())\nraw_result = await self.api_request('set', params)\nif return_raw_response:\n return raw_result\nresult = BaseOkResponse(**raw_result)\nreturn result"], "bodies_text": "<|body_start_0|>\n params = get_params(locals())\n raw_result = await self.api_request('get', params)\n if return_raw_response:\n return raw_result\n result = StorageGetResponse(**raw_result)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n params = get_params(locals())\n raw_result = await self.api_request('getKeys', params)\n if return_raw_response:\n return raw_result\n result = StorageGetKeysResponse(**raw_result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n params = get_params(locals())\n raw_result = await self.api_request('set', params)\n if return_raw_response:\n return raw_result\n result = BaseOkResponse(**raw_result)\n return result\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Storage", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Storage:\n\n async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]:\n \"\"\":param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_0|>\n\n async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]:\n \"\"\":param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_1|>\n\n async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]:\n \"\"\":param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n params = get_params(locals())\n raw_result = await self.api_request('get', params)\n if return_raw_response:\n return raw_result\n result = StorageGetResponse(**raw_result)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n params = get_params(locals())\n raw_result = await self.api_request('getKeys', params)\n if return_raw_response:\n return raw_result\n result = StorageGetKeysResponse(**raw_result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n params = get_params(locals())\n raw_result = await self.api_request('set', params)\n if return_raw_response:\n return raw_result\n result = BaseOkResponse(**raw_result)\n return result\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000411", "length_bytes": 2260, "license_type": "permissive", "methods": [{"docstring": ":param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:", "name": "get", "signature": "async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]"}, {"docstring": ":param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:", "name": "get_keys", "signature": "async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]"}, {"docstring": ":param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:", "name": "set", "signature": "async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]"}], "n_methods": 3, "prompt": "Implement the Python class `Storage` described below.\n\nClass description:\nImplement the Storage class.\n\nMethod signatures and docstrings:\n- async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]: :param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:\n- async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]: :param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:\n- async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]: :param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:", "prompted_full_text": "Implement the Python class `Storage` described below.\n\nClass description:\nImplement the Storage class.\n\nMethod signatures and docstrings:\n- async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]: :param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:\n- async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]: :param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:\n- async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]: :param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:\n\n<|skeleton|>\nclass Storage:\n\n async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]:\n \"\"\":param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_0|>\n\n async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]:\n \"\"\":param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_1|>\n\n async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]:\n \"\"\":param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n params = get_params(locals())\n raw_result = await self.api_request('get', params)\n if return_raw_response:\n return raw_result\n result = StorageGetResponse(**raw_result)\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n params = get_params(locals())\n raw_result = await self.api_request('getKeys', params)\n if return_raw_response:\n return raw_result\n result = StorageGetKeysResponse(**raw_result)\n return result\n<|end_body_1|>\n\n<|body_start_2|>\n params = get_params(locals())\n raw_result = await self.api_request('set', params)\n if return_raw_response:\n return raw_result\n result = BaseOkResponse(**raw_result)\n return result\n<|end_body_2|>\n", "revision_id": "d88311a680e52faf04f3a18f9c5b381ee9e94a8f", "skeleton": "<|skeleton|>\nclass Storage:\n\n async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]:\n \"\"\":param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_0|>\n\n async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]:\n \"\"\":param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_1|>\n\n async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]:\n \"\"\":param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Storage:\n async def get(self, return_raw_response: bool=False, key: typing.Optional[str]=None, keys: typing.Optional[typing.List[str]]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, StorageGetResponse]:\n \"\"\":param key: :param keys: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n params = get_params(locals())\n raw_result = await self.api_request('get', params)\n if return_raw_response:\n return raw_result\n result = StorageGetResponse(**raw_result)\n return result\n\n async def get_keys(self, return_raw_response: bool=False, user_id: typing.Optional[int]=None, offset: typing.Optional[int]=None, count: typing.Optional[int]=None) -> typing.Union[dict, StorageGetKeysResponse]:\n \"\"\":param user_id: - user id, whose variables names are returned if they were requested with a server method. :param offset: :param count: - amount of variable names the info needs to be collected from. :param return_raw_response: - return result at dict :return:\"\"\"\n params = get_params(locals())\n raw_result = await self.api_request('getKeys', params)\n if return_raw_response:\n return raw_result\n result = StorageGetKeysResponse(**raw_result)\n return result\n\n async def set(self, key: str, return_raw_response: bool=False, value: typing.Optional[str]=None, user_id: typing.Optional[int]=None) -> typing.Union[dict, BaseOkResponse]:\n \"\"\":param key: :param value: :param user_id: :param return_raw_response: - return result at dict :return:\"\"\"\n params = get_params(locals())\n raw_result = await self.api_request('set', params)\n if return_raw_response:\n return raw_result\n result = BaseOkResponse(**raw_result)\n return result\n", "source": "the_stack_v2_python_sparse", "source_path": "vkwave/api/methods/storage.py", "source_repo": "prog1ckg/vkwave", "split": "test", "star_events_count": 0} {"blob_id": "f283e03baec31c90a4d5155e7093ac57af448ea3", "bodies": ["if subsets is not None:\n for ss in subsets:\n self[ss] = set()", "if key not in self:\n self[key] = set()\nself[key].add(val)"], "bodies_text": "<|body_start_0|>\n if subsets is not None:\n for ss in subsets:\n self[ss] = set()\n<|end_body_0|>\n\n<|body_start_1|>\n if key not in self:\n self[key] = set()\n self[key].add(val)\n<|end_body_1|>\n", "class_docstring": "Dictionary of sets", "class_name": "SetDict", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SetDict:\n \"\"\"Dictionary of sets\"\"\"\n\n def __init__(self, subsets=None):\n \"\"\"sets can be pre-defined to allow for empty sets or shared subsets objects\"\"\"\n <|body_0|>\n\n def add(self, key, val):\n \"\"\"add a value to a set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if subsets is not None:\n for ss in subsets:\n self[ss] = set()\n<|end_body_0|>\n\n<|body_start_1|>\n if key not in self:\n self[key] = set()\n self[key].add(val)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000412", "length_bytes": 5202, "license_type": "permissive", "methods": [{"docstring": "sets can be pre-defined to allow for empty sets or shared subsets objects", "name": "__init__", "signature": "def __init__(self, subsets=None)"}, {"docstring": "add a value to a set", "name": "add", "signature": "def add(self, key, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027869", "prompt": "Implement the Python class `SetDict` described below.\n\nClass description:\nDictionary of sets\n\nMethod signatures and docstrings:\n- def __init__(self, subsets=None): sets can be pre-defined to allow for empty sets or shared subsets objects\n- def add(self, key, val): add a value to a set", "prompted_full_text": "Implement the Python class `SetDict` described below.\n\nClass description:\nDictionary of sets\n\nMethod signatures and docstrings:\n- def __init__(self, subsets=None): sets can be pre-defined to allow for empty sets or shared subsets objects\n- def add(self, key, val): add a value to a set\n\n<|skeleton|>\nclass SetDict:\n \"\"\"Dictionary of sets\"\"\"\n\n def __init__(self, subsets=None):\n \"\"\"sets can be pre-defined to allow for empty sets or shared subsets objects\"\"\"\n <|body_0|>\n\n def add(self, key, val):\n \"\"\"add a value to a set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if subsets is not None:\n for ss in subsets:\n self[ss] = set()\n<|end_body_0|>\n\n<|body_start_1|>\n if key not in self:\n self[key] = set()\n self[key].add(val)\n<|end_body_1|>\n", "revision_id": "163736e360a7d49901fe42f4e9bd10faf708b69d", "skeleton": "<|skeleton|>\nclass SetDict:\n \"\"\"Dictionary of sets\"\"\"\n\n def __init__(self, subsets=None):\n \"\"\"sets can be pre-defined to allow for empty sets or shared subsets objects\"\"\"\n <|body_0|>\n\n def add(self, key, val):\n \"\"\"add a value to a set\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SetDict:\n \"\"\"Dictionary of sets\"\"\"\n\n def __init__(self, subsets=None):\n \"\"\"sets can be pre-defined to allow for empty sets or shared subsets objects\"\"\"\n if subsets is not None:\n for ss in subsets:\n self[ss] = set()\n\n def add(self, key, val):\n \"\"\"add a value to a set\"\"\"\n if key not in self:\n self[key] = set()\n self[key].add(val)\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/pycbio/stats/venn.py", "source_repo": "diekhans/pycbio", "split": "test", "star_events_count": 2} {"blob_id": "557d6e74e0faeb90155f57f9302edd20b5fa33e8", "bodies": ["response = requests.get(cls.COURSE_URL + '-')\ncourses = response.json()['course']\nfor course in courses:\n course_code = course['code'].upper()\n if course_code in skip:\n continue\n response = requests.get(cls.COURSE_URL + course_code)\n course_info = response.json()['course']\n yield {'course_code': course_code, 'full_name': course_info['name'], 'homepage': cls.course_homepage(course_info)}", "info_types = course.get('infoType')\nif not info_types:\n return ''\nfor info in info_types:\n if info['code'] == 'E-URL' and 'text' in info:\n return info['text'] or ''\nreturn ''"], "bodies_text": "<|body_start_0|>\n response = requests.get(cls.COURSE_URL + '-')\n courses = response.json()['course']\n for course in courses:\n course_code = course['code'].upper()\n if course_code in skip:\n continue\n response = requests.get(cls.COURSE_URL + course_code)\n course_info = response.json()['course']\n yield {'course_code': course_code, 'full_name': course_info['name'], 'homepage': cls.course_homepage(course_info)}\n<|end_body_0|>\n\n<|body_start_1|>\n info_types = course.get('infoType')\n if not info_types:\n return ''\n for info in info_types:\n if info['code'] == 'E-URL' and 'text' in info:\n return info['text'] or ''\n return ''\n<|end_body_1|>\n", "class_docstring": "Class for interacting with the NTNU-IME API.", "class_name": "IMEAPI", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass IMEAPI:\n \"\"\"Class for interacting with the NTNU-IME API.\"\"\"\n\n def all_courses(cls, skip: Set[str]):\n \"\"\"Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.\"\"\"\n <|body_0|>\n\n def course_homepage(course):\n \"\"\"Retrieve course homepage if present in Course API response.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = requests.get(cls.COURSE_URL + '-')\n courses = response.json()['course']\n for course in courses:\n course_code = course['code'].upper()\n if course_code in skip:\n continue\n response = requests.get(cls.COURSE_URL + course_code)\n course_info = response.json()['course']\n yield {'course_code': course_code, 'full_name': course_info['name'], 'homepage': cls.course_homepage(course_info)}\n<|end_body_0|>\n\n<|body_start_1|>\n info_types = course.get('infoType')\n if not info_types:\n return ''\n for info in info_types:\n if info['code'] == 'E-URL' and 'text' in info:\n return info['text'] or ''\n return ''\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000413", "length_bytes": 2132, "license_type": "permissive", "methods": [{"docstring": "Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.", "name": "all_courses", "signature": "def all_courses(cls, skip: Set[str])"}, {"docstring": "Retrieve course homepage if present in Course API response.", "name": "course_homepage", "signature": "def course_homepage(course)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041245", "prompt": "Implement the Python class `IMEAPI` described below.\n\nClass description:\nClass for interacting with the NTNU-IME API.\n\nMethod signatures and docstrings:\n- def all_courses(cls, skip: Set[str]): Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.\n- def course_homepage(course): Retrieve course homepage if present in Course API response.", "prompted_full_text": "Implement the Python class `IMEAPI` described below.\n\nClass description:\nClass for interacting with the NTNU-IME API.\n\nMethod signatures and docstrings:\n- def all_courses(cls, skip: Set[str]): Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.\n- def course_homepage(course): Retrieve course homepage if present in Course API response.\n\n<|skeleton|>\nclass IMEAPI:\n \"\"\"Class for interacting with the NTNU-IME API.\"\"\"\n\n def all_courses(cls, skip: Set[str]):\n \"\"\"Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.\"\"\"\n <|body_0|>\n\n def course_homepage(course):\n \"\"\"Retrieve course homepage if present in Course API response.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n response = requests.get(cls.COURSE_URL + '-')\n courses = response.json()['course']\n for course in courses:\n course_code = course['code'].upper()\n if course_code in skip:\n continue\n response = requests.get(cls.COURSE_URL + course_code)\n course_info = response.json()['course']\n yield {'course_code': course_code, 'full_name': course_info['name'], 'homepage': cls.course_homepage(course_info)}\n<|end_body_0|>\n\n<|body_start_1|>\n info_types = course.get('infoType')\n if not info_types:\n return ''\n for info in info_types:\n if info['code'] == 'E-URL' and 'text' in info:\n return info['text'] or ''\n return ''\n<|end_body_1|>\n", "revision_id": "5743b1d4c3fefa66fcaa4d283436d2a3f0490604", "skeleton": "<|skeleton|>\nclass IMEAPI:\n \"\"\"Class for interacting with the NTNU-IME API.\"\"\"\n\n def all_courses(cls, skip: Set[str]):\n \"\"\"Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.\"\"\"\n <|body_0|>\n\n def course_homepage(course):\n \"\"\"Retrieve course homepage if present in Course API response.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class IMEAPI:\n \"\"\"Class for interacting with the NTNU-IME API.\"\"\"\n\n def all_courses(cls, skip: Set[str]):\n \"\"\"Yield all courses available from the IME API. :param skip: List of course codes which should not be yielded.\"\"\"\n response = requests.get(cls.COURSE_URL + '-')\n courses = response.json()['course']\n for course in courses:\n course_code = course['code'].upper()\n if course_code in skip:\n continue\n response = requests.get(cls.COURSE_URL + course_code)\n course_info = response.json()['course']\n yield {'course_code': course_code, 'full_name': course_info['name'], 'homepage': cls.course_homepage(course_info)}\n\n def course_homepage(course):\n \"\"\"Retrieve course homepage if present in Course API response.\"\"\"\n info_types = course.get('infoType')\n if not info_types:\n return ''\n for info in info_types:\n if info['code'] == 'E-URL' and 'text' in info:\n return info['text'] or ''\n return ''\n", "source": "the_stack_v2_python_sparse", "source_path": "semesterpage/management/commands/populate_courses.py", "source_repo": "JakobGM/WikiLinks", "split": "test", "star_events_count": 7} {"blob_id": "73d3d2465bc2e4e6d86d2cb99dcda7f206940575", "bodies": ["self._db = 'strategy'\nself._collection = 'asset_snapshot'\nsuper(AssetSnapshotData, self).__init__(self._db, self._collection)", "d = {'platform': platform, 'account': account}\nfor key, value in asset.items():\n d[key] = value\nasset_id = await self.insert(d)\nreturn asset_id", "if not end:\n end = tools.get_cur_timestamp()\nif not start:\n start = end - 60 * 60 * 24\nspec = {'platform': platform, 'account': account, 'create_time': {'$gte': start, '$lte': end}}\nfields = {'platform': 0, 'account': 0, 'update_time': 0}\ndatas = await self.get_list(spec, fields=fields)\nreturn datas", "spec = {'platform': platform, 'account': account}\n_sort = [('update_time', -1)]\nasset = await self.find_one(spec, sort=_sort)\nif asset:\n del asset['_id']\nreturn asset"], "bodies_text": "<|body_start_0|>\n self._db = 'strategy'\n self._collection = 'asset_snapshot'\n super(AssetSnapshotData, self).__init__(self._db, self._collection)\n<|end_body_0|>\n\n<|body_start_1|>\n d = {'platform': platform, 'account': account}\n for key, value in asset.items():\n d[key] = value\n asset_id = await self.insert(d)\n return asset_id\n<|end_body_1|>\n\n<|body_start_2|>\n if not end:\n end = tools.get_cur_timestamp()\n if not start:\n start = end - 60 * 60 * 24\n spec = {'platform': platform, 'account': account, 'create_time': {'$gte': start, '$lte': end}}\n fields = {'platform': 0, 'account': 0, 'update_time': 0}\n datas = await self.get_list(spec, fields=fields)\n return datas\n<|end_body_2|>\n\n<|body_start_3|>\n spec = {'platform': platform, 'account': account}\n _sort = [('update_time', -1)]\n asset = await self.find_one(spec, sort=_sort)\n if asset:\n del asset['_id']\n return asset\n<|end_body_3|>\n", "class_docstring": "资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}", "class_name": "AssetSnapshotData", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AssetSnapshotData:\n \"\"\"资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}\"\"\"\n\n def __init__(self):\n \"\"\"初始化\"\"\"\n <|body_0|>\n\n async def create_new_asset(self, platform, account, asset):\n \"\"\"创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情\"\"\"\n <|body_1|>\n\n async def get_asset_snapshot(self, platform, account, start=None, end=None):\n \"\"\"获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)\"\"\"\n <|body_2|>\n\n async def get_latest_asset_snapshot(self, platform, account):\n \"\"\"查询最新的资产快照 @param platform 交易平台 @param account 账户\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._db = 'strategy'\n self._collection = 'asset_snapshot'\n super(AssetSnapshotData, self).__init__(self._db, self._collection)\n<|end_body_0|>\n\n<|body_start_1|>\n d = {'platform': platform, 'account': account}\n for key, value in asset.items():\n d[key] = value\n asset_id = await self.insert(d)\n return asset_id\n<|end_body_1|>\n\n<|body_start_2|>\n if not end:\n end = tools.get_cur_timestamp()\n if not start:\n start = end - 60 * 60 * 24\n spec = {'platform': platform, 'account': account, 'create_time': {'$gte': start, '$lte': end}}\n fields = {'platform': 0, 'account': 0, 'update_time': 0}\n datas = await self.get_list(spec, fields=fields)\n return datas\n<|end_body_2|>\n\n<|body_start_3|>\n spec = {'platform': platform, 'account': account}\n _sort = [('update_time', -1)]\n asset = await self.find_one(spec, sort=_sort)\n if asset:\n del asset['_id']\n return asset\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000414", "length_bytes": 12604, "license_type": "permissive", "methods": [{"docstring": "初始化", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情", "name": "create_new_asset", "signature": "async def create_new_asset(self, platform, account, asset)"}, {"docstring": "获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)", "name": "get_asset_snapshot", "signature": "async def get_asset_snapshot(self, platform, account, start=None, end=None)"}, {"docstring": "查询最新的资产快照 @param platform 交易平台 @param account 账户", "name": "get_latest_asset_snapshot", "signature": "async def get_latest_asset_snapshot(self, platform, account)"}], "n_methods": 4, "prompt": "Implement the Python class `AssetSnapshotData` described below.\n\nClass description:\n资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}\n\nMethod signatures and docstrings:\n- def __init__(self): 初始化\n- async def create_new_asset(self, platform, account, asset): 创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情\n- async def get_asset_snapshot(self, platform, account, start=None, end=None): 获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)\n- async def get_latest_asset_snapshot(self, platform, account): 查询最新的资产快照 @param platform 交易平台 @param account 账户", "prompted_full_text": "Implement the Python class `AssetSnapshotData` described below.\n\nClass description:\n资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}\n\nMethod signatures and docstrings:\n- def __init__(self): 初始化\n- async def create_new_asset(self, platform, account, asset): 创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情\n- async def get_asset_snapshot(self, platform, account, start=None, end=None): 获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)\n- async def get_latest_asset_snapshot(self, platform, account): 查询最新的资产快照 @param platform 交易平台 @param account 账户\n\n<|skeleton|>\nclass AssetSnapshotData:\n \"\"\"资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}\"\"\"\n\n def __init__(self):\n \"\"\"初始化\"\"\"\n <|body_0|>\n\n async def create_new_asset(self, platform, account, asset):\n \"\"\"创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情\"\"\"\n <|body_1|>\n\n async def get_asset_snapshot(self, platform, account, start=None, end=None):\n \"\"\"获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)\"\"\"\n <|body_2|>\n\n async def get_latest_asset_snapshot(self, platform, account):\n \"\"\"查询最新的资产快照 @param platform 交易平台 @param account 账户\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self._db = 'strategy'\n self._collection = 'asset_snapshot'\n super(AssetSnapshotData, self).__init__(self._db, self._collection)\n<|end_body_0|>\n\n<|body_start_1|>\n d = {'platform': platform, 'account': account}\n for key, value in asset.items():\n d[key] = value\n asset_id = await self.insert(d)\n return asset_id\n<|end_body_1|>\n\n<|body_start_2|>\n if not end:\n end = tools.get_cur_timestamp()\n if not start:\n start = end - 60 * 60 * 24\n spec = {'platform': platform, 'account': account, 'create_time': {'$gte': start, '$lte': end}}\n fields = {'platform': 0, 'account': 0, 'update_time': 0}\n datas = await self.get_list(spec, fields=fields)\n return datas\n<|end_body_2|>\n\n<|body_start_3|>\n spec = {'platform': platform, 'account': account}\n _sort = [('update_time', -1)]\n asset = await self.find_one(spec, sort=_sort)\n if asset:\n del asset['_id']\n return asset\n<|end_body_3|>\n", "revision_id": "b0b9d60439a916bc4b1980f908f648aa863d5918", "skeleton": "<|skeleton|>\nclass AssetSnapshotData:\n \"\"\"资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}\"\"\"\n\n def __init__(self):\n \"\"\"初始化\"\"\"\n <|body_0|>\n\n async def create_new_asset(self, platform, account, asset):\n \"\"\"创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情\"\"\"\n <|body_1|>\n\n async def get_asset_snapshot(self, platform, account, start=None, end=None):\n \"\"\"获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)\"\"\"\n <|body_2|>\n\n async def get_latest_asset_snapshot(self, platform, account):\n \"\"\"查询最新的资产快照 @param platform 交易平台 @param account 账户\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AssetSnapshotData:\n \"\"\"资产数据快照存储 每隔一个小时,从 strategy.asset 表中,创建一次快照数据 资产数据结构: {}\"\"\"\n\n def __init__(self):\n \"\"\"初始化\"\"\"\n self._db = 'strategy'\n self._collection = 'asset_snapshot'\n super(AssetSnapshotData, self).__init__(self._db, self._collection)\n\n async def create_new_asset(self, platform, account, asset):\n \"\"\"创建新的资产信息 @param platform 交易平台 @param account 账户 @param asset 资产详情\"\"\"\n d = {'platform': platform, 'account': account}\n for key, value in asset.items():\n d[key] = value\n asset_id = await self.insert(d)\n return asset_id\n\n async def get_asset_snapshot(self, platform, account, start=None, end=None):\n \"\"\"获取资产快照 @param platform 交易平台 @param account 账户 @param start 开始时间戳(秒) @param end 结束时间戳(秒)\"\"\"\n if not end:\n end = tools.get_cur_timestamp()\n if not start:\n start = end - 60 * 60 * 24\n spec = {'platform': platform, 'account': account, 'create_time': {'$gte': start, '$lte': end}}\n fields = {'platform': 0, 'account': 0, 'update_time': 0}\n datas = await self.get_list(spec, fields=fields)\n return datas\n\n async def get_latest_asset_snapshot(self, platform, account):\n \"\"\"查询最新的资产快照 @param platform 交易平台 @param account 账户\"\"\"\n spec = {'platform': platform, 'account': account}\n _sort = [('update_time', -1)]\n asset = await self.find_one(spec, sort=_sort)\n if asset:\n del asset['_id']\n return asset\n", "source": "the_stack_v2_python_sparse", "source_path": "quant/data.py", "source_repo": "51bitquant/thenextquant", "split": "test", "star_events_count": 6} {"blob_id": "419d9f7cb5c30e41b4cbfa30c4ae53fe38225ed3", "bodies": ["if nums[minIndex] == target:\n return minIndex\nif nums[maxIndex] == target:\n return maxIndex\nif maxIndex == minIndex:\n return 0 if target == nums[minIndex] else -1\nmedian = (minIndex + maxIndex) / 2\nif nums[median] == target:\n return median\nif nums[median] > nums[minIndex] and nums[median] > nums[maxIndex]:\n if target > nums[median] or target < nums[minIndex]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\nelif nums[median] < nums[minIndex] and nums[median] < nums[maxIndex]:\n if target < nums[median] or target > nums[maxIndex]:\n return self.findTarget(nums, minIndex, median, target)\n else:\n return self.findTarget(nums, median + 1, maxIndex, target)\nelif target < nums[minIndex] or target > nums[maxIndex]:\n return -1\nelif target > nums[median]:\n return self.findTarget(nums, median + 1, maxIndex, target)\nelse:\n return self.findTarget(nums, minIndex, median, target)", "numSize = len(nums)\nif numSize == 0:\n return -1\nminIndex = 0\nmaxIndex = numSize - 1\nreturn self.findTarget(nums, minIndex, maxIndex, target)"], "bodies_text": "<|body_start_0|>\n if nums[minIndex] == target:\n return minIndex\n if nums[maxIndex] == target:\n return maxIndex\n if maxIndex == minIndex:\n return 0 if target == nums[minIndex] else -1\n median = (minIndex + maxIndex) / 2\n if nums[median] == target:\n return median\n if nums[median] > nums[minIndex] and nums[median] > nums[maxIndex]:\n if target > nums[median] or target < nums[minIndex]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n elif nums[median] < nums[minIndex] and nums[median] < nums[maxIndex]:\n if target < nums[median] or target > nums[maxIndex]:\n return self.findTarget(nums, minIndex, median, target)\n else:\n return self.findTarget(nums, median + 1, maxIndex, target)\n elif target < nums[minIndex] or target > nums[maxIndex]:\n return -1\n elif target > nums[median]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n<|end_body_0|>\n\n<|body_start_1|>\n numSize = len(nums)\n if numSize == 0:\n return -1\n minIndex = 0\n maxIndex = numSize - 1\n return self.findTarget(nums, minIndex, maxIndex, target)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def findTarget(self, nums, minIndex, maxIndex, target):\n \"\"\":type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums[minIndex] == target:\n return minIndex\n if nums[maxIndex] == target:\n return maxIndex\n if maxIndex == minIndex:\n return 0 if target == nums[minIndex] else -1\n median = (minIndex + maxIndex) / 2\n if nums[median] == target:\n return median\n if nums[median] > nums[minIndex] and nums[median] > nums[maxIndex]:\n if target > nums[median] or target < nums[minIndex]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n elif nums[median] < nums[minIndex] and nums[median] < nums[maxIndex]:\n if target < nums[median] or target > nums[maxIndex]:\n return self.findTarget(nums, minIndex, median, target)\n else:\n return self.findTarget(nums, median + 1, maxIndex, target)\n elif target < nums[minIndex] or target > nums[maxIndex]:\n return -1\n elif target > nums[median]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n<|end_body_0|>\n\n<|body_start_1|>\n numSize = len(nums)\n if numSize == 0:\n return -1\n minIndex = 0\n maxIndex = numSize - 1\n return self.findTarget(nums, minIndex, maxIndex, target)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000415", "length_bytes": 2019, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int", "name": "findTarget", "signature": "def findTarget(self, nums, minIndex, maxIndex, target)"}, {"docstring": ":type nums: List[int] :type target: int :rtype: int", "name": "search", "signature": "def search(self, nums, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_027653", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findTarget(self, nums, minIndex, maxIndex, target): :type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int\n- def search(self, nums, target): :type nums: List[int] :type target: int :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def findTarget(self, nums, minIndex, maxIndex, target): :type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int\n- def search(self, nums, target): :type nums: List[int] :type target: int :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def findTarget(self, nums, minIndex, maxIndex, target):\n \"\"\":type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums[minIndex] == target:\n return minIndex\n if nums[maxIndex] == target:\n return maxIndex\n if maxIndex == minIndex:\n return 0 if target == nums[minIndex] else -1\n median = (minIndex + maxIndex) / 2\n if nums[median] == target:\n return median\n if nums[median] > nums[minIndex] and nums[median] > nums[maxIndex]:\n if target > nums[median] or target < nums[minIndex]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n elif nums[median] < nums[minIndex] and nums[median] < nums[maxIndex]:\n if target < nums[median] or target > nums[maxIndex]:\n return self.findTarget(nums, minIndex, median, target)\n else:\n return self.findTarget(nums, median + 1, maxIndex, target)\n elif target < nums[minIndex] or target > nums[maxIndex]:\n return -1\n elif target > nums[median]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n<|end_body_0|>\n\n<|body_start_1|>\n numSize = len(nums)\n if numSize == 0:\n return -1\n minIndex = 0\n maxIndex = numSize - 1\n return self.findTarget(nums, minIndex, maxIndex, target)\n<|end_body_1|>\n", "revision_id": "1dd5192e8beae28d7c08346133e3f47797c42b5a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def findTarget(self, nums, minIndex, maxIndex, target):\n \"\"\":type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int\"\"\"\n <|body_0|>\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def findTarget(self, nums, minIndex, maxIndex, target):\n \"\"\":type nums: List[index] :type minIndex: int :type maxIndex: int :type target: int :rtype: int\"\"\"\n if nums[minIndex] == target:\n return minIndex\n if nums[maxIndex] == target:\n return maxIndex\n if maxIndex == minIndex:\n return 0 if target == nums[minIndex] else -1\n median = (minIndex + maxIndex) / 2\n if nums[median] == target:\n return median\n if nums[median] > nums[minIndex] and nums[median] > nums[maxIndex]:\n if target > nums[median] or target < nums[minIndex]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n elif nums[median] < nums[minIndex] and nums[median] < nums[maxIndex]:\n if target < nums[median] or target > nums[maxIndex]:\n return self.findTarget(nums, minIndex, median, target)\n else:\n return self.findTarget(nums, median + 1, maxIndex, target)\n elif target < nums[minIndex] or target > nums[maxIndex]:\n return -1\n elif target > nums[median]:\n return self.findTarget(nums, median + 1, maxIndex, target)\n else:\n return self.findTarget(nums, minIndex, median, target)\n\n def search(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: int\"\"\"\n numSize = len(nums)\n if numSize == 0:\n return -1\n minIndex = 0\n maxIndex = numSize - 1\n return self.findTarget(nums, minIndex, maxIndex, target)\n", "source": "the_stack_v2_python_sparse", "source_path": "python/leet_033.py", "source_repo": "wolaoa/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "5f704b799b54c9f3aa516fc53ff4ea73794344fc", "bodies": ["if model._meta.app_label == 'trees':\n return 'djangofirst'\nreturn None", "if model._meta.app_label == 'trees':\n return 'djangofirst'\nreturn None", "print(db, app_label, model_name, hints)\nif app_label == 'trees':\n return db == 'djangofirst'\nreturn None"], "bodies_text": "<|body_start_0|>\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n print(db, app_label, model_name, hints)\n if app_label == 'trees':\n return db == 'djangofirst'\n return None\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DBRouter", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DBRouter:\n\n def db_for_read(self, model, **hints):\n \"\"\"All cache read operations go to the replica\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"All cache write operations go to primary\"\"\"\n <|body_1|>\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Only install the cache model on primary\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n print(db, app_label, model_name, hints)\n if app_label == 'trees':\n return db == 'djangofirst'\n return None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000416", "length_bytes": 737, "license_type": "no_license", "methods": [{"docstring": "All cache read operations go to the replica", "name": "db_for_read", "signature": "def db_for_read(self, model, **hints)"}, {"docstring": "All cache write operations go to primary", "name": "db_for_write", "signature": "def db_for_write(self, model, **hints)"}, {"docstring": "Only install the cache model on primary", "name": "allow_migrate", "signature": "def allow_migrate(self, db, app_label, model_name=None, **hints)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_008675", "prompt": "Implement the Python class `DBRouter` described below.\n\nClass description:\nImplement the DBRouter class.\n\nMethod signatures and docstrings:\n- def db_for_read(self, model, **hints): All cache read operations go to the replica\n- def db_for_write(self, model, **hints): All cache write operations go to primary\n- def allow_migrate(self, db, app_label, model_name=None, **hints): Only install the cache model on primary", "prompted_full_text": "Implement the Python class `DBRouter` described below.\n\nClass description:\nImplement the DBRouter class.\n\nMethod signatures and docstrings:\n- def db_for_read(self, model, **hints): All cache read operations go to the replica\n- def db_for_write(self, model, **hints): All cache write operations go to primary\n- def allow_migrate(self, db, app_label, model_name=None, **hints): Only install the cache model on primary\n\n<|skeleton|>\nclass DBRouter:\n\n def db_for_read(self, model, **hints):\n \"\"\"All cache read operations go to the replica\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"All cache write operations go to primary\"\"\"\n <|body_1|>\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Only install the cache model on primary\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n<|end_body_1|>\n\n<|body_start_2|>\n print(db, app_label, model_name, hints)\n if app_label == 'trees':\n return db == 'djangofirst'\n return None\n<|end_body_2|>\n", "revision_id": "b2aecb026895cc39ed8203403b7501bbdde1cde2", "skeleton": "<|skeleton|>\nclass DBRouter:\n\n def db_for_read(self, model, **hints):\n \"\"\"All cache read operations go to the replica\"\"\"\n <|body_0|>\n\n def db_for_write(self, model, **hints):\n \"\"\"All cache write operations go to primary\"\"\"\n <|body_1|>\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Only install the cache model on primary\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DBRouter:\n def db_for_read(self, model, **hints):\n \"\"\"All cache read operations go to the replica\"\"\"\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n\n def db_for_write(self, model, **hints):\n \"\"\"All cache write operations go to primary\"\"\"\n if model._meta.app_label == 'trees':\n return 'djangofirst'\n return None\n\n def allow_migrate(self, db, app_label, model_name=None, **hints):\n \"\"\"Only install the cache model on primary\"\"\"\n print(db, app_label, model_name, hints)\n if app_label == 'trees':\n return db == 'djangofirst'\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "django_sites/src/django_sites/db_routers.py", "source_repo": "rensg001/test_scripts", "split": "test", "star_events_count": 0} {"blob_id": "4683561b5c7b54e19ffe300d2e40196b4d154ce0", "bodies": ["logger.debug(__('Performing health check with message {}.', message))\npath = Path(message['file'])\nif await database_sync_to_async(self.check_database, thread_sensitive=False)():\n logger.debug('Health check passed.')\n path.touch(exist_ok=True)", "with connection.cursor() as cursor:\n cursor.execute('SELECT 1;')\n result = cursor.fetchone()[0]\n return result == 1"], "bodies_text": "<|body_start_0|>\n logger.debug(__('Performing health check with message {}.', message))\n path = Path(message['file'])\n if await database_sync_to_async(self.check_database, thread_sensitive=False)():\n logger.debug('Health check passed.')\n path.touch(exist_ok=True)\n<|end_body_0|>\n\n<|body_start_1|>\n with connection.cursor() as cursor:\n cursor.execute('SELECT 1;')\n result = cursor.fetchone()[0]\n return result == 1\n<|end_body_1|>\n", "class_docstring": "Channels consumer for handling health-check events.", "class_name": "HealtCheckConsumer", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass HealtCheckConsumer:\n \"\"\"Channels consumer for handling health-check events.\"\"\"\n\n async def health_check(self, message: dict):\n \"\"\"Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.\"\"\"\n <|body_0|>\n\n def check_database(self) -> bool:\n \"\"\"Perform a simple database check.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug(__('Performing health check with message {}.', message))\n path = Path(message['file'])\n if await database_sync_to_async(self.check_database, thread_sensitive=False)():\n logger.debug('Health check passed.')\n path.touch(exist_ok=True)\n<|end_body_0|>\n\n<|body_start_1|>\n with connection.cursor() as cursor:\n cursor.execute('SELECT 1;')\n result = cursor.fetchone()[0]\n return result == 1\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000417", "length_bytes": 3964, "license_type": "permissive", "methods": [{"docstring": "Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.", "name": "health_check", "signature": "async def health_check(self, message: dict)"}, {"docstring": "Perform a simple database check.", "name": "check_database", "signature": "def check_database(self) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035093", "prompt": "Implement the Python class `HealtCheckConsumer` described below.\n\nClass description:\nChannels consumer for handling health-check events.\n\nMethod signatures and docstrings:\n- async def health_check(self, message: dict): Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.\n- def check_database(self) -> bool: Perform a simple database check.", "prompted_full_text": "Implement the Python class `HealtCheckConsumer` described below.\n\nClass description:\nChannels consumer for handling health-check events.\n\nMethod signatures and docstrings:\n- async def health_check(self, message: dict): Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.\n- def check_database(self) -> bool: Perform a simple database check.\n\n<|skeleton|>\nclass HealtCheckConsumer:\n \"\"\"Channels consumer for handling health-check events.\"\"\"\n\n async def health_check(self, message: dict):\n \"\"\"Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.\"\"\"\n <|body_0|>\n\n def check_database(self) -> bool:\n \"\"\"Perform a simple database check.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logger.debug(__('Performing health check with message {}.', message))\n path = Path(message['file'])\n if await database_sync_to_async(self.check_database, thread_sensitive=False)():\n logger.debug('Health check passed.')\n path.touch(exist_ok=True)\n<|end_body_0|>\n\n<|body_start_1|>\n with connection.cursor() as cursor:\n cursor.execute('SELECT 1;')\n result = cursor.fetchone()[0]\n return result == 1\n<|end_body_1|>\n", "revision_id": "25c0c45235ef37beb45c1af4c917fbbae6282016", "skeleton": "<|skeleton|>\nclass HealtCheckConsumer:\n \"\"\"Channels consumer for handling health-check events.\"\"\"\n\n async def health_check(self, message: dict):\n \"\"\"Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.\"\"\"\n <|body_0|>\n\n def check_database(self) -> bool:\n \"\"\"Perform a simple database check.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class HealtCheckConsumer:\n \"\"\"Channels consumer for handling health-check events.\"\"\"\n\n async def health_check(self, message: dict):\n \"\"\"Perform health check. We are testing the channels layer and database layer. The channels layer is already functioning if this method is called so we have to perform database check. If the check is successfull touch the file specified in the channels message.\"\"\"\n logger.debug(__('Performing health check with message {}.', message))\n path = Path(message['file'])\n if await database_sync_to_async(self.check_database, thread_sensitive=False)():\n logger.debug('Health check passed.')\n path.touch(exist_ok=True)\n\n def check_database(self) -> bool:\n \"\"\"Perform a simple database check.\"\"\"\n with connection.cursor() as cursor:\n cursor.execute('SELECT 1;')\n result = cursor.fetchone()[0]\n return result == 1\n", "source": "the_stack_v2_python_sparse", "source_path": "resolwe/flow/managers/consumer.py", "source_repo": "genialis/resolwe", "split": "test", "star_events_count": 35} {"blob_id": "fff7b890da23348a6c8b6afa9e22bb9afec32872", "bodies": ["article = ArticleInst.fetch(slug)\ncomment = request.data.get('comment', {})\nposted_comment = CommentAPIView.check_comment(id, article)\nserializer = self.serializer_class(data=comment)\nserializer.is_valid(raise_exception=True)\nstatus_ = status.HTTP_201_CREATED\ntry:\n CommentReply.objects.get(comment_to=posted_comment, author=request.user, body=comment.get('body').strip())\nexcept CommentReply.DoesNotExist:\n serializer.save(author=request.user, article=article, comment_to=posted_comment)\n resp = {'message': f'Replied to comment of ID {id}'}\n resp['data'] = serializer.data\nelse:\n resp = {'message': \"Seems you've posted an exact comment before\"}\n status_ = status.HTTP_409_CONFLICT\nreturn Response(data=resp, status=status_)", "article = ArticleInst.fetch(slug)\nposted_comment = CommentAPIView.check_comment(article=article, key=id)\ncomments = posted_comment.replies\ncomments = self.serializer_class(comments, many=True).data\nresponse = {'comment': posted_comment.body}\nresponse['replies'] = comments\nresponse.update({'replies count': len(comments)})\nreturn Response(data=response, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n article = ArticleInst.fetch(slug)\n comment = request.data.get('comment', {})\n posted_comment = CommentAPIView.check_comment(id, article)\n serializer = self.serializer_class(data=comment)\n serializer.is_valid(raise_exception=True)\n status_ = status.HTTP_201_CREATED\n try:\n CommentReply.objects.get(comment_to=posted_comment, author=request.user, body=comment.get('body').strip())\n except CommentReply.DoesNotExist:\n serializer.save(author=request.user, article=article, comment_to=posted_comment)\n resp = {'message': f'Replied to comment of ID {id}'}\n resp['data'] = serializer.data\n else:\n resp = {'message': \"Seems you've posted an exact comment before\"}\n status_ = status.HTTP_409_CONFLICT\n return Response(data=resp, status=status_)\n<|end_body_0|>\n\n<|body_start_1|>\n article = ArticleInst.fetch(slug)\n posted_comment = CommentAPIView.check_comment(article=article, key=id)\n comments = posted_comment.replies\n comments = self.serializer_class(comments, many=True).data\n response = {'comment': posted_comment.body}\n response['replies'] = comments\n response.update({'replies count': len(comments)})\n return Response(data=response, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "class_docstring": "Handles viweing of replies made to a comment and replying to an article comment", "class_name": "ReplyList", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ReplyList:\n \"\"\"Handles viweing of replies made to a comment and replying to an article comment\"\"\"\n\n def post(self, request, slug, id):\n \"\"\"Posts a reply to a comment\"\"\"\n <|body_0|>\n\n def get(self, request, slug, id):\n \"\"\"Retrieves all replies to a comment of matching ID\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n article = ArticleInst.fetch(slug)\n comment = request.data.get('comment', {})\n posted_comment = CommentAPIView.check_comment(id, article)\n serializer = self.serializer_class(data=comment)\n serializer.is_valid(raise_exception=True)\n status_ = status.HTTP_201_CREATED\n try:\n CommentReply.objects.get(comment_to=posted_comment, author=request.user, body=comment.get('body').strip())\n except CommentReply.DoesNotExist:\n serializer.save(author=request.user, article=article, comment_to=posted_comment)\n resp = {'message': f'Replied to comment of ID {id}'}\n resp['data'] = serializer.data\n else:\n resp = {'message': \"Seems you've posted an exact comment before\"}\n status_ = status.HTTP_409_CONFLICT\n return Response(data=resp, status=status_)\n<|end_body_0|>\n\n<|body_start_1|>\n article = ArticleInst.fetch(slug)\n posted_comment = CommentAPIView.check_comment(article=article, key=id)\n comments = posted_comment.replies\n comments = self.serializer_class(comments, many=True).data\n response = {'comment': posted_comment.body}\n response['replies'] = comments\n response.update({'replies count': len(comments)})\n return Response(data=response, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000418", "length_bytes": 10918, "license_type": "permissive", "methods": [{"docstring": "Posts a reply to a comment", "name": "post", "signature": "def post(self, request, slug, id)"}, {"docstring": "Retrieves all replies to a comment of matching ID", "name": "get", "signature": "def get(self, request, slug, id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_052581", "prompt": "Implement the Python class `ReplyList` described below.\n\nClass description:\nHandles viweing of replies made to a comment and replying to an article comment\n\nMethod signatures and docstrings:\n- def post(self, request, slug, id): Posts a reply to a comment\n- def get(self, request, slug, id): Retrieves all replies to a comment of matching ID", "prompted_full_text": "Implement the Python class `ReplyList` described below.\n\nClass description:\nHandles viweing of replies made to a comment and replying to an article comment\n\nMethod signatures and docstrings:\n- def post(self, request, slug, id): Posts a reply to a comment\n- def get(self, request, slug, id): Retrieves all replies to a comment of matching ID\n\n<|skeleton|>\nclass ReplyList:\n \"\"\"Handles viweing of replies made to a comment and replying to an article comment\"\"\"\n\n def post(self, request, slug, id):\n \"\"\"Posts a reply to a comment\"\"\"\n <|body_0|>\n\n def get(self, request, slug, id):\n \"\"\"Retrieves all replies to a comment of matching ID\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n article = ArticleInst.fetch(slug)\n comment = request.data.get('comment', {})\n posted_comment = CommentAPIView.check_comment(id, article)\n serializer = self.serializer_class(data=comment)\n serializer.is_valid(raise_exception=True)\n status_ = status.HTTP_201_CREATED\n try:\n CommentReply.objects.get(comment_to=posted_comment, author=request.user, body=comment.get('body').strip())\n except CommentReply.DoesNotExist:\n serializer.save(author=request.user, article=article, comment_to=posted_comment)\n resp = {'message': f'Replied to comment of ID {id}'}\n resp['data'] = serializer.data\n else:\n resp = {'message': \"Seems you've posted an exact comment before\"}\n status_ = status.HTTP_409_CONFLICT\n return Response(data=resp, status=status_)\n<|end_body_0|>\n\n<|body_start_1|>\n article = ArticleInst.fetch(slug)\n posted_comment = CommentAPIView.check_comment(article=article, key=id)\n comments = posted_comment.replies\n comments = self.serializer_class(comments, many=True).data\n response = {'comment': posted_comment.body}\n response['replies'] = comments\n response.update({'replies count': len(comments)})\n return Response(data=response, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "revision_id": "b80ad485339dbb02b74d9b2093543bf8173d51de", "skeleton": "<|skeleton|>\nclass ReplyList:\n \"\"\"Handles viweing of replies made to a comment and replying to an article comment\"\"\"\n\n def post(self, request, slug, id):\n \"\"\"Posts a reply to a comment\"\"\"\n <|body_0|>\n\n def get(self, request, slug, id):\n \"\"\"Retrieves all replies to a comment of matching ID\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ReplyList:\n \"\"\"Handles viweing of replies made to a comment and replying to an article comment\"\"\"\n\n def post(self, request, slug, id):\n \"\"\"Posts a reply to a comment\"\"\"\n article = ArticleInst.fetch(slug)\n comment = request.data.get('comment', {})\n posted_comment = CommentAPIView.check_comment(id, article)\n serializer = self.serializer_class(data=comment)\n serializer.is_valid(raise_exception=True)\n status_ = status.HTTP_201_CREATED\n try:\n CommentReply.objects.get(comment_to=posted_comment, author=request.user, body=comment.get('body').strip())\n except CommentReply.DoesNotExist:\n serializer.save(author=request.user, article=article, comment_to=posted_comment)\n resp = {'message': f'Replied to comment of ID {id}'}\n resp['data'] = serializer.data\n else:\n resp = {'message': \"Seems you've posted an exact comment before\"}\n status_ = status.HTTP_409_CONFLICT\n return Response(data=resp, status=status_)\n\n def get(self, request, slug, id):\n \"\"\"Retrieves all replies to a comment of matching ID\"\"\"\n article = ArticleInst.fetch(slug)\n posted_comment = CommentAPIView.check_comment(article=article, key=id)\n comments = posted_comment.replies\n comments = self.serializer_class(comments, many=True).data\n response = {'comment': posted_comment.body}\n response['replies'] = comments\n response.update({'replies count': len(comments)})\n return Response(data=response, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "authors/apps/comments/views.py", "source_repo": "deferral/ah-django", "split": "test", "star_events_count": 1} {"blob_id": "3f4e613387c8c936554c09b5c9ef126ccd459886", "bodies": ["d = {}\nfor i, num in enumerate(nums):\n if num in d:\n return [d[num], i]\n d[target - num] = i", "for i, num in enumerate(nums):\n if target - num in nums[i + 1:]:\n return [i, len(nums) - 1 - nums[::-1].index(target - num)]"], "bodies_text": "<|body_start_0|>\n d = {}\n for i, num in enumerate(nums):\n if num in d:\n return [d[num], i]\n d[target - num] = i\n<|end_body_0|>\n\n<|body_start_1|>\n for i, num in enumerate(nums):\n if target - num in nums[i + 1:]:\n return [i, len(nums) - 1 - nums[::-1].index(target - num)]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"hash table\"\"\"\n <|body_0|>\n\n def twoSum2(self, nums, target):\n \"\"\"first try\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = {}\n for i, num in enumerate(nums):\n if num in d:\n return [d[num], i]\n d[target - num] = i\n<|end_body_0|>\n\n<|body_start_1|>\n for i, num in enumerate(nums):\n if target - num in nums[i + 1:]:\n return [i, len(nums) - 1 - nums[::-1].index(target - num)]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000419", "length_bytes": 887, "license_type": "permissive", "methods": [{"docstring": "hash table", "name": "twoSum", "signature": "def twoSum(self, nums, target)"}, {"docstring": "first try", "name": "twoSum2", "signature": "def twoSum2(self, nums, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_041347", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def twoSum(self, nums, target): hash table\n- def twoSum2(self, nums, target): first try", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def twoSum(self, nums, target): hash table\n- def twoSum2(self, nums, target): first try\n\n<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"hash table\"\"\"\n <|body_0|>\n\n def twoSum2(self, nums, target):\n \"\"\"first try\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = {}\n for i, num in enumerate(nums):\n if num in d:\n return [d[num], i]\n d[target - num] = i\n<|end_body_0|>\n\n<|body_start_1|>\n for i, num in enumerate(nums):\n if target - num in nums[i + 1:]:\n return [i, len(nums) - 1 - nums[::-1].index(target - num)]\n<|end_body_1|>\n", "revision_id": "49a0b03c55d8a702785888d473ef96539265ce9c", "skeleton": "<|skeleton|>\nclass Solution:\n\n def twoSum(self, nums, target):\n \"\"\"hash table\"\"\"\n <|body_0|>\n\n def twoSum2(self, nums, target):\n \"\"\"first try\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def twoSum(self, nums, target):\n \"\"\"hash table\"\"\"\n d = {}\n for i, num in enumerate(nums):\n if num in d:\n return [d[num], i]\n d[target - num] = i\n\n def twoSum2(self, nums, target):\n \"\"\"first try\"\"\"\n for i, num in enumerate(nums):\n if target - num in nums[i + 1:]:\n return [i, len(nums) - 1 - nums[::-1].index(target - num)]\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/0001_two_sum.py", "source_repo": "chaosWsF/Python-Practice", "split": "test", "star_events_count": 1} {"blob_id": "4b42b73fdb1a36f31b0f36dcd082f9c769b92a5c", "bodies": ["self.size = size\nself.current_size = 0\nself.values = collections.deque()", "if self.current_size < self.size:\n self.values.append(val)\n self.current_size += 1\n return 1.0 * sum(self.values) / len(self.values)\nelse:\n self.values.append(val)\n self.values.popleft()\n return 1.0 * sum(self.values) / len(self.values)"], "bodies_text": "<|body_start_0|>\n self.size = size\n self.current_size = 0\n self.values = collections.deque()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.current_size < self.size:\n self.values.append(val)\n self.current_size += 1\n return 1.0 * sum(self.values) / len(self.values)\n else:\n self.values.append(val)\n self.values.popleft()\n return 1.0 * sum(self.values) / len(self.values)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "MovingAverage", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.size = size\n self.current_size = 0\n self.values = collections.deque()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.current_size < self.size:\n self.values.append(val)\n self.current_size += 1\n return 1.0 * sum(self.values) / len(self.values)\n else:\n self.values.append(val)\n self.values.popleft()\n return 1.0 * sum(self.values) / len(self.values)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000420", "length_bytes": 1304, "license_type": "no_license", "methods": [{"docstring": "Initialize your data structure here. :type size: int", "name": "__init__", "signature": "def __init__(self, size)"}, {"docstring": ":type val: int :rtype: float", "name": "next", "signature": "def next(self, val)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_004207", "prompt": "Implement the Python class `MovingAverage` described below.\n\nClass description:\nImplement the MovingAverage class.\n\nMethod signatures and docstrings:\n- def __init__(self, size): Initialize your data structure here. :type size: int\n- def next(self, val): :type val: int :rtype: float", "prompted_full_text": "Implement the Python class `MovingAverage` described below.\n\nClass description:\nImplement the MovingAverage class.\n\nMethod signatures and docstrings:\n- def __init__(self, size): Initialize your data structure here. :type size: int\n- def next(self, val): :type val: int :rtype: float\n\n<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.size = size\n self.current_size = 0\n self.values = collections.deque()\n<|end_body_0|>\n\n<|body_start_1|>\n if self.current_size < self.size:\n self.values.append(val)\n self.current_size += 1\n return 1.0 * sum(self.values) / len(self.values)\n else:\n self.values.append(val)\n self.values.popleft()\n return 1.0 * sum(self.values) / len(self.values)\n<|end_body_1|>\n", "revision_id": "6de551327f96ec4d4b63d0045281b65bbb4f5d0f", "skeleton": "<|skeleton|>\nclass MovingAverage:\n\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n <|body_0|>\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MovingAverage:\n def __init__(self, size):\n \"\"\"Initialize your data structure here. :type size: int\"\"\"\n self.size = size\n self.current_size = 0\n self.values = collections.deque()\n\n def next(self, val):\n \"\"\":type val: int :rtype: float\"\"\"\n if self.current_size < self.size:\n self.values.append(val)\n self.current_size += 1\n return 1.0 * sum(self.values) / len(self.values)\n else:\n self.values.append(val)\n self.values.popleft()\n return 1.0 * sum(self.values) / len(self.values)\n", "source": "the_stack_v2_python_sparse", "source_path": "MovingAverage.py", "source_repo": "JingweiTu/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "4c57b45e7b5ad3d8833dbba87d8bc707e7d20d29", "bodies": ["c_multiplier, c_ratio = self._get_contrast_multiplier(web_access, el, edge_metrics)\nedge_metrics.contrast_ratio = c_ratio\nif c_multiplier == 0:\n return 0.0\nsize_multiplier, size = self._get_size_multiplier(web_access, el, edge_metrics)\nedge_metrics.size = size\nreturn size_multiplier * c_multiplier", "contrast_ratio = edge_metrics.build_data.contrast_ratio(web_access, None, el)\nfont_size = edge_metrics.build_data.font_size(web_access, None, el)\nif font_size < 18:\n if contrast_ratio >= reg_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\nelif contrast_ratio >= large_text_cutoff:\n return (1.0, contrast_ratio)\nelse:\n return (0.0, contrast_ratio)", "exception_tags = ['li', 'ul']\nheight = edge_metrics.build_data.height(web_access, None, el)\nwidth = edge_metrics.build_data.width(web_access, None, el)\ntag_name = edge_metrics.build_data.tag_name(web_access, None, el)\nif tag_name in exception_tags or (height >= min_pixels and width >= min_pixels):\n return (1.0, (width, height))\nelse:\n return (0.0, (width, height))", "tags = set()\ntags.add(web_access.get_selenium_element(el).get_attribute('innerText'))\ntags.add(web_access.get_selenium_element(el).get_attribute('value'))\ntags.add(web_access.get_selenium_element(el).get_attribute('type'))\nfield_id = web_access.get_selenium_element(el).get_attribute('id')\nif field_id is not None:\n labels = web_access.query_xpath('//label[@for=\"%s\"]' % field_id)\n if labels is not None:\n for label in labels:\n if self.score_perceive(web_access, label, web_access._create_edge_metrics()) > 0.0:\n t = web_access.get_selenium_element(label).get_attribute('innerText')\n if t is not None:\n tags.add(t)\nreturn tags"], "bodies_text": "<|body_start_0|>\n c_multiplier, c_ratio = self._get_contrast_multiplier(web_access, el, edge_metrics)\n edge_metrics.contrast_ratio = c_ratio\n if c_multiplier == 0:\n return 0.0\n size_multiplier, size = self._get_size_multiplier(web_access, el, edge_metrics)\n edge_metrics.size = size\n return size_multiplier * c_multiplier\n<|end_body_0|>\n\n<|body_start_1|>\n contrast_ratio = edge_metrics.build_data.contrast_ratio(web_access, None, el)\n font_size = edge_metrics.build_data.font_size(web_access, None, el)\n if font_size < 18:\n if contrast_ratio >= reg_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n elif contrast_ratio >= large_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n<|end_body_1|>\n\n<|body_start_2|>\n exception_tags = ['li', 'ul']\n height = edge_metrics.build_data.height(web_access, None, el)\n width = edge_metrics.build_data.width(web_access, None, el)\n tag_name = edge_metrics.build_data.tag_name(web_access, None, el)\n if tag_name in exception_tags or (height >= min_pixels and width >= min_pixels):\n return (1.0, (width, height))\n else:\n return (0.0, (width, height))\n<|end_body_2|>\n\n<|body_start_3|>\n tags = set()\n tags.add(web_access.get_selenium_element(el).get_attribute('innerText'))\n tags.add(web_access.get_selenium_element(el).get_attribute('value'))\n tags.add(web_access.get_selenium_element(el).get_attribute('type'))\n field_id = web_access.get_selenium_element(el).get_attribute('id')\n if field_id is not None:\n labels = web_access.query_xpath('//label[@for=\"%s\"]' % field_id)\n if labels is not None:\n for label in labels:\n if self.score_perceive(web_access, label, web_access._create_edge_metrics()) > 0.0:\n t = web_access.get_selenium_element(label).get_attribute('innerText')\n if t is not None:\n tags.add(t)\n return tags\n<|end_body_3|>\n", "class_docstring": "This class represents a user with normal vision. It can serve as a base class for other vision-related components.", "class_name": "VisionAbility", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass VisionAbility:\n \"\"\"This class represents a user with normal vision. It can serve as a base class for other vision-related components.\"\"\"\n\n def score_perceive(self, web_access, el, edge_metrics):\n \"\"\"Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]\"\"\"\n <|body_0|>\n\n def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0):\n \"\"\"Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG\"\"\"\n <|body_1|>\n\n def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44):\n \"\"\"Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el\"\"\"\n <|body_2|>\n\n def describe(self, web_access, el):\n \"\"\"Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n c_multiplier, c_ratio = self._get_contrast_multiplier(web_access, el, edge_metrics)\n edge_metrics.contrast_ratio = c_ratio\n if c_multiplier == 0:\n return 0.0\n size_multiplier, size = self._get_size_multiplier(web_access, el, edge_metrics)\n edge_metrics.size = size\n return size_multiplier * c_multiplier\n<|end_body_0|>\n\n<|body_start_1|>\n contrast_ratio = edge_metrics.build_data.contrast_ratio(web_access, None, el)\n font_size = edge_metrics.build_data.font_size(web_access, None, el)\n if font_size < 18:\n if contrast_ratio >= reg_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n elif contrast_ratio >= large_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n<|end_body_1|>\n\n<|body_start_2|>\n exception_tags = ['li', 'ul']\n height = edge_metrics.build_data.height(web_access, None, el)\n width = edge_metrics.build_data.width(web_access, None, el)\n tag_name = edge_metrics.build_data.tag_name(web_access, None, el)\n if tag_name in exception_tags or (height >= min_pixels and width >= min_pixels):\n return (1.0, (width, height))\n else:\n return (0.0, (width, height))\n<|end_body_2|>\n\n<|body_start_3|>\n tags = set()\n tags.add(web_access.get_selenium_element(el).get_attribute('innerText'))\n tags.add(web_access.get_selenium_element(el).get_attribute('value'))\n tags.add(web_access.get_selenium_element(el).get_attribute('type'))\n field_id = web_access.get_selenium_element(el).get_attribute('id')\n if field_id is not None:\n labels = web_access.query_xpath('//label[@for=\"%s\"]' % field_id)\n if labels is not None:\n for label in labels:\n if self.score_perceive(web_access, label, web_access._create_edge_metrics()) > 0.0:\n t = web_access.get_selenium_element(label).get_attribute('innerText')\n if t is not None:\n tags.add(t)\n return tags\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000421", "length_bytes": 14913, "license_type": "permissive", "methods": [{"docstring": "Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]", "name": "score_perceive", "signature": "def score_perceive(self, web_access, el, edge_metrics)"}, {"docstring": "Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG", "name": "_get_contrast_multiplier", "signature": "def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0)"}, {"docstring": "Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el", "name": "_get_size_multiplier", "signature": "def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44)"}, {"docstring": "Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.", "name": "describe", "signature": "def describe(self, web_access, el)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_030021", "prompt": "Implement the Python class `VisionAbility` described below.\n\nClass description:\nThis class represents a user with normal vision. It can serve as a base class for other vision-related components.\n\nMethod signatures and docstrings:\n- def score_perceive(self, web_access, el, edge_metrics): Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]\n- def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0): Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG\n- def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44): Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el\n- def describe(self, web_access, el): Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.", "prompted_full_text": "Implement the Python class `VisionAbility` described below.\n\nClass description:\nThis class represents a user with normal vision. It can serve as a base class for other vision-related components.\n\nMethod signatures and docstrings:\n- def score_perceive(self, web_access, el, edge_metrics): Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]\n- def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0): Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG\n- def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44): Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el\n- def describe(self, web_access, el): Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.\n\n<|skeleton|>\nclass VisionAbility:\n \"\"\"This class represents a user with normal vision. It can serve as a base class for other vision-related components.\"\"\"\n\n def score_perceive(self, web_access, el, edge_metrics):\n \"\"\"Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]\"\"\"\n <|body_0|>\n\n def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0):\n \"\"\"Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG\"\"\"\n <|body_1|>\n\n def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44):\n \"\"\"Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el\"\"\"\n <|body_2|>\n\n def describe(self, web_access, el):\n \"\"\"Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n c_multiplier, c_ratio = self._get_contrast_multiplier(web_access, el, edge_metrics)\n edge_metrics.contrast_ratio = c_ratio\n if c_multiplier == 0:\n return 0.0\n size_multiplier, size = self._get_size_multiplier(web_access, el, edge_metrics)\n edge_metrics.size = size\n return size_multiplier * c_multiplier\n<|end_body_0|>\n\n<|body_start_1|>\n contrast_ratio = edge_metrics.build_data.contrast_ratio(web_access, None, el)\n font_size = edge_metrics.build_data.font_size(web_access, None, el)\n if font_size < 18:\n if contrast_ratio >= reg_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n elif contrast_ratio >= large_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n<|end_body_1|>\n\n<|body_start_2|>\n exception_tags = ['li', 'ul']\n height = edge_metrics.build_data.height(web_access, None, el)\n width = edge_metrics.build_data.width(web_access, None, el)\n tag_name = edge_metrics.build_data.tag_name(web_access, None, el)\n if tag_name in exception_tags or (height >= min_pixels and width >= min_pixels):\n return (1.0, (width, height))\n else:\n return (0.0, (width, height))\n<|end_body_2|>\n\n<|body_start_3|>\n tags = set()\n tags.add(web_access.get_selenium_element(el).get_attribute('innerText'))\n tags.add(web_access.get_selenium_element(el).get_attribute('value'))\n tags.add(web_access.get_selenium_element(el).get_attribute('type'))\n field_id = web_access.get_selenium_element(el).get_attribute('id')\n if field_id is not None:\n labels = web_access.query_xpath('//label[@for=\"%s\"]' % field_id)\n if labels is not None:\n for label in labels:\n if self.score_perceive(web_access, label, web_access._create_edge_metrics()) > 0.0:\n t = web_access.get_selenium_element(label).get_attribute('innerText')\n if t is not None:\n tags.add(t)\n return tags\n<|end_body_3|>\n", "revision_id": "32d80d94d1b5402ba70f7c9bb4ae2704456dc236", "skeleton": "<|skeleton|>\nclass VisionAbility:\n \"\"\"This class represents a user with normal vision. It can serve as a base class for other vision-related components.\"\"\"\n\n def score_perceive(self, web_access, el, edge_metrics):\n \"\"\"Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]\"\"\"\n <|body_0|>\n\n def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0):\n \"\"\"Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG\"\"\"\n <|body_1|>\n\n def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44):\n \"\"\"Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el\"\"\"\n <|body_2|>\n\n def describe(self, web_access, el):\n \"\"\"Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class VisionAbility:\n \"\"\"This class represents a user with normal vision. It can serve as a base class for other vision-related components.\"\"\"\n\n def score_perceive(self, web_access, el, edge_metrics):\n \"\"\"Evaluate the user's ability to perceive el, based on contrast & size Args: web_access: Web interface to interact with the browser el: A particular element on this interface. edge_metrics: EdgeMetrics object that stores data for a user/edge. Returns: contrast_multiplier: perceive_score float in [0, 1]\"\"\"\n c_multiplier, c_ratio = self._get_contrast_multiplier(web_access, el, edge_metrics)\n edge_metrics.contrast_ratio = c_ratio\n if c_multiplier == 0:\n return 0.0\n size_multiplier, size = self._get_size_multiplier(web_access, el, edge_metrics)\n edge_metrics.size = size\n return size_multiplier * c_multiplier\n\n def _get_contrast_multiplier(web_access, el, edge_metrics, reg_text_cutoff=4.5, large_text_cutoff=3.0):\n \"\"\"Returns the contrast_multiplier for a given element on a page. Uses AA WCAG 2 standards: https://webaim.org/articles/contrast/ Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to reg_text_cutoff: min contrast ratio for regular-sized text large_text_cutoff min contrast ratio for large text Returns: contrast_multiplier: 0.0 or 1.0, representing complaince with WCAG contrast_ratio: contrast ratio used by WCAG\"\"\"\n contrast_ratio = edge_metrics.build_data.contrast_ratio(web_access, None, el)\n font_size = edge_metrics.build_data.font_size(web_access, None, el)\n if font_size < 18:\n if contrast_ratio >= reg_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n elif contrast_ratio >= large_text_cutoff:\n return (1.0, contrast_ratio)\n else:\n return (0.0, contrast_ratio)\n\n def _get_size_multiplier(web_access, el, edge_metrics, min_pixels=44):\n \"\"\"Returns the size_multiplier for a given element on a page. Uses AAA WCAG 2: https://www.w3.org/WAI/WCAG21/quickref/#target-size Args: web_access: Web interface to interact with the browser el: WebAccess::Element representing the element to find dist to min_pixels: Number of pixels to an element must be in each dimension Returns: size_multiplier: 0.0 or 1.0, representing complaince with WCAG size_xy: tuple (x, y), representing the width, height of the el\"\"\"\n exception_tags = ['li', 'ul']\n height = edge_metrics.build_data.height(web_access, None, el)\n width = edge_metrics.build_data.width(web_access, None, el)\n tag_name = edge_metrics.build_data.tag_name(web_access, None, el)\n if tag_name in exception_tags or (height >= min_pixels and width >= min_pixels):\n return (1.0, (width, height))\n else:\n return (0.0, (width, height))\n\n def describe(self, web_access, el):\n \"\"\"Describes an element based on its innerText, value, type, id, and any labels if they are perceivable. Args: web_access: Web access to the user interface for retrieving actionable elements on a web page. el: WebAccess::Element representing the element to find dist to Returns: A list containing the innerText, value, type, id, and any perceivable labels of an element.\"\"\"\n tags = set()\n tags.add(web_access.get_selenium_element(el).get_attribute('innerText'))\n tags.add(web_access.get_selenium_element(el).get_attribute('value'))\n tags.add(web_access.get_selenium_element(el).get_attribute('type'))\n field_id = web_access.get_selenium_element(el).get_attribute('id')\n if field_id is not None:\n labels = web_access.query_xpath('//label[@for=\"%s\"]' % field_id)\n if labels is not None:\n for label in labels:\n if self.score_perceive(web_access, label, web_access._create_edge_metrics()) > 0.0:\n t = web_access.get_selenium_element(label).get_attribute('innerText')\n if t is not None:\n tags.add(t)\n return tags\n", "source": "the_stack_v2_python_sparse", "source_path": "demodocusfw/web/accessibility/ability.py", "source_repo": "WilcoFiers/demodocus", "split": "test", "star_events_count": 0} {"blob_id": "e90faba1719dc1d56f83bb6a9c9764298a6b8067", "bodies": ["ObjectManager.__init__(self)\nself.getters.update({'description': 'get_general', 'name': 'get_general', 'post': 'get_foreign_key'})\nself.setters.update({'description': 'set_general', 'name': 'set_general', 'post': 'set_foreign_key'})\nself.my_django_model = facade.models.ForumPostAttachment\nself.setter = facade.subsystems.Setter", "if optional_attributes is None:\n optional_attributes = dict()\nnew_attachment = self.my_django_model(name=name)\nnew_attachment.blame = facade.managers.BlameManager().create(auth_token)\nnew_attachment.post = self._find_by_id(post, facade.models.ForumPost)\nnew_attachment.save()\nself.authorizer.check_create_permissions(auth_token, new_attachment)\nreturn new_attachment"], "bodies_text": "<|body_start_0|>\n ObjectManager.__init__(self)\n self.getters.update({'description': 'get_general', 'name': 'get_general', 'post': 'get_foreign_key'})\n self.setters.update({'description': 'set_general', 'name': 'set_general', 'post': 'set_foreign_key'})\n self.my_django_model = facade.models.ForumPostAttachment\n self.setter = facade.subsystems.Setter\n<|end_body_0|>\n\n<|body_start_1|>\n if optional_attributes is None:\n optional_attributes = dict()\n new_attachment = self.my_django_model(name=name)\n new_attachment.blame = facade.managers.BlameManager().create(auth_token)\n new_attachment.post = self._find_by_id(post, facade.models.ForumPost)\n new_attachment.save()\n self.authorizer.check_create_permissions(auth_token, new_attachment)\n return new_attachment\n<|end_body_1|>\n", "class_docstring": "Manage Attachments in the Power Reg Forum system", "class_name": "ForumPostAttachmentManager", "detected_licenses": ["BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ForumPostAttachmentManager:\n \"\"\"Manage Attachments in the Power Reg Forum system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, name, post, optional_attributes=None):\n \"\"\"Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ObjectManager.__init__(self)\n self.getters.update({'description': 'get_general', 'name': 'get_general', 'post': 'get_foreign_key'})\n self.setters.update({'description': 'set_general', 'name': 'set_general', 'post': 'set_foreign_key'})\n self.my_django_model = facade.models.ForumPostAttachment\n self.setter = facade.subsystems.Setter\n<|end_body_0|>\n\n<|body_start_1|>\n if optional_attributes is None:\n optional_attributes = dict()\n new_attachment = self.my_django_model(name=name)\n new_attachment.blame = facade.managers.BlameManager().create(auth_token)\n new_attachment.post = self._find_by_id(post, facade.models.ForumPost)\n new_attachment.save()\n self.authorizer.check_create_permissions(auth_token, new_attachment)\n return new_attachment\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000422", "length_bytes": 1653, "license_type": "permissive", "methods": [{"docstring": "constructor", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment", "name": "create", "signature": "def create(self, auth_token, name, post, optional_attributes=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_007331", "prompt": "Implement the Python class `ForumPostAttachmentManager` described below.\n\nClass description:\nManage Attachments in the Power Reg Forum system\n\nMethod signatures and docstrings:\n- def __init__(self): constructor\n- def create(self, auth_token, name, post, optional_attributes=None): Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment", "prompted_full_text": "Implement the Python class `ForumPostAttachmentManager` described below.\n\nClass description:\nManage Attachments in the Power Reg Forum system\n\nMethod signatures and docstrings:\n- def __init__(self): constructor\n- def create(self, auth_token, name, post, optional_attributes=None): Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment\n\n<|skeleton|>\nclass ForumPostAttachmentManager:\n \"\"\"Manage Attachments in the Power Reg Forum system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, name, post, optional_attributes=None):\n \"\"\"Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n ObjectManager.__init__(self)\n self.getters.update({'description': 'get_general', 'name': 'get_general', 'post': 'get_foreign_key'})\n self.setters.update({'description': 'set_general', 'name': 'set_general', 'post': 'set_foreign_key'})\n self.my_django_model = facade.models.ForumPostAttachment\n self.setter = facade.subsystems.Setter\n<|end_body_0|>\n\n<|body_start_1|>\n if optional_attributes is None:\n optional_attributes = dict()\n new_attachment = self.my_django_model(name=name)\n new_attachment.blame = facade.managers.BlameManager().create(auth_token)\n new_attachment.post = self._find_by_id(post, facade.models.ForumPost)\n new_attachment.save()\n self.authorizer.check_create_permissions(auth_token, new_attachment)\n return new_attachment\n<|end_body_1|>\n", "revision_id": "a59457bc37f0501aea1f54d006a6de94ff80511c", "skeleton": "<|skeleton|>\nclass ForumPostAttachmentManager:\n \"\"\"Manage Attachments in the Power Reg Forum system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n <|body_0|>\n\n def create(self, auth_token, name, post, optional_attributes=None):\n \"\"\"Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ForumPostAttachmentManager:\n \"\"\"Manage Attachments in the Power Reg Forum system\"\"\"\n\n def __init__(self):\n \"\"\"constructor\"\"\"\n ObjectManager.__init__(self)\n self.getters.update({'description': 'get_general', 'name': 'get_general', 'post': 'get_foreign_key'})\n self.setters.update({'description': 'set_general', 'name': 'set_general', 'post': 'set_foreign_key'})\n self.my_django_model = facade.models.ForumPostAttachment\n self.setter = facade.subsystems.Setter\n\n def create(self, auth_token, name, post, optional_attributes=None):\n \"\"\"Create a new Attachment @param name name of the Attachment @type name string @param post post FK @type post int @return a reference to the newly created Attachment\"\"\"\n if optional_attributes is None:\n optional_attributes = dict()\n new_attachment = self.my_django_model(name=name)\n new_attachment.blame = facade.managers.BlameManager().create(auth_token)\n new_attachment.post = self._find_by_id(post, facade.models.ForumPost)\n new_attachment.save()\n self.authorizer.check_create_permissions(auth_token, new_attachment)\n return new_attachment\n", "source": "the_stack_v2_python_sparse", "source_path": "forum/managers/post_attachment.py", "source_repo": "ninemoreminutes/openassign-server", "split": "test", "star_events_count": 0} {"blob_id": "76099d1e141be2c47a5a0c71a385c426959eec49", "bodies": ["self.project = project\nself.previously_indexed = []\nself.logger = logging.getLogger(__name__)\nself.project_logger = ProjectLogger(self.logger, project)", "without_stops = []\nfor word in words:\n if word.word.lower() not in app.config['STOPWORDS']:\n without_stops.append(word)\nreturn without_stops", "sequences = []\nfor i in range(0, len(sentence.words)):\n self.previously_indexed = []\n for j in range(i + 1, len(sentence.words) + 1):\n if j - i < 5:\n sequences.extend(self.get_sequence(sentence, i, j))\nif isinstance(sequence_dict, dict):\n for sequence in sequences:\n sequence_text = sequence['sequence']\n lemmatized = sequence['is_lemmatized']\n has_function_words = sequence['has_function_words']\n all_function_words = sequence['all_function_words']\n length = len(sequence['words'])\n position = sequence['start_position']\n key = sequence_text\n if key in sequence_dict.keys():\n sequence = sequence_dict[key]\n else:\n try:\n sequence = Sequence.query.filter_by(sequence=sequence_text).one()\n except MultipleResultsFound:\n self.project_logger.error('Duplicate records found for: %s', str(key))\n except NoResultFound:\n sequence = Sequence(sequence=sequence_text, lemmatized=lemmatized, has_function_words=has_function_words, all_function_words=all_function_words, length=length)\n sequence_dict[key] = sequence\n sentence.add_sequence(sequence=sequence, position=position, project=self.project, force=False)\nreturn sequences", "sequences = []\nwordlist = sentence.words[i:j]\nlemmatized_phrase = join_tws(wordlist, ' ', 'lemma')\nsurface_phrase = join_tws(wordlist, ' ', 'word')\nif surface_phrase in self.previously_indexed:\n return sequences\nwordlist_nostops = self.remove_stops(wordlist)\nlemmatized_phrase_nostops = join_tws(wordlist_nostops, ' ', LEMMA)\nsurface_phrase_nostops = join_tws(wordlist_nostops, ' ', WORD)\nhas_stops = len(wordlist_nostops) < len(wordlist)\nlemmatized_has_stops = len(lemmatized_phrase_nostops) < len(lemmatized_phrase)\nall_stop_words = len(wordlist_nostops) == 0\nlemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0\nsequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase, 'is_lemmatized': False, 'has_function_words': has_stops, 'all_function_words': all_stop_words, 'words': wordlist})\nself.previously_indexed.append(surface_phrase)\nif has_stops and (not all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not surface_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase_nostops, 'is_lemmatized': False, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n self.previously_indexed.append(surface_phrase_nostops)\nsequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase, 'is_lemmatized': True, 'has_function_words': lemmatized_has_stops, 'all_function_words': lemmatized_all_stop_words, 'words': wordlist})\nself.previously_indexed.append(lemmatized_phrase)\nif lemmatized_has_stops and (not lemmatized_all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not lemmatized_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase_nostops, 'is_lemmatized': True, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\nreturn sequences"], "bodies_text": "<|body_start_0|>\n self.project = project\n self.previously_indexed = []\n self.logger = logging.getLogger(__name__)\n self.project_logger = ProjectLogger(self.logger, project)\n<|end_body_0|>\n\n<|body_start_1|>\n without_stops = []\n for word in words:\n if word.word.lower() not in app.config['STOPWORDS']:\n without_stops.append(word)\n return without_stops\n<|end_body_1|>\n\n<|body_start_2|>\n sequences = []\n for i in range(0, len(sentence.words)):\n self.previously_indexed = []\n for j in range(i + 1, len(sentence.words) + 1):\n if j - i < 5:\n sequences.extend(self.get_sequence(sentence, i, j))\n if isinstance(sequence_dict, dict):\n for sequence in sequences:\n sequence_text = sequence['sequence']\n lemmatized = sequence['is_lemmatized']\n has_function_words = sequence['has_function_words']\n all_function_words = sequence['all_function_words']\n length = len(sequence['words'])\n position = sequence['start_position']\n key = sequence_text\n if key in sequence_dict.keys():\n sequence = sequence_dict[key]\n else:\n try:\n sequence = Sequence.query.filter_by(sequence=sequence_text).one()\n except MultipleResultsFound:\n self.project_logger.error('Duplicate records found for: %s', str(key))\n except NoResultFound:\n sequence = Sequence(sequence=sequence_text, lemmatized=lemmatized, has_function_words=has_function_words, all_function_words=all_function_words, length=length)\n sequence_dict[key] = sequence\n sentence.add_sequence(sequence=sequence, position=position, project=self.project, force=False)\n return sequences\n<|end_body_2|>\n\n<|body_start_3|>\n sequences = []\n wordlist = sentence.words[i:j]\n lemmatized_phrase = join_tws(wordlist, ' ', 'lemma')\n surface_phrase = join_tws(wordlist, ' ', 'word')\n if surface_phrase in self.previously_indexed:\n return sequences\n wordlist_nostops = self.remove_stops(wordlist)\n lemmatized_phrase_nostops = join_tws(wordlist_nostops, ' ', LEMMA)\n surface_phrase_nostops = join_tws(wordlist_nostops, ' ', WORD)\n has_stops = len(wordlist_nostops) < len(wordlist)\n lemmatized_has_stops = len(lemmatized_phrase_nostops) < len(lemmatized_phrase)\n all_stop_words = len(wordlist_nostops) == 0\n lemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase, 'is_lemmatized': False, 'has_function_words': has_stops, 'all_function_words': all_stop_words, 'words': wordlist})\n self.previously_indexed.append(surface_phrase)\n if has_stops and (not all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not surface_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase_nostops, 'is_lemmatized': False, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n self.previously_indexed.append(surface_phrase_nostops)\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase, 'is_lemmatized': True, 'has_function_words': lemmatized_has_stops, 'all_function_words': lemmatized_all_stop_words, 'words': wordlist})\n self.previously_indexed.append(lemmatized_phrase)\n if lemmatized_has_stops and (not lemmatized_all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not lemmatized_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase_nostops, 'is_lemmatized': True, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n return sequences\n<|end_body_3|>\n", "class_docstring": "Process given input into Sequences.", "class_name": "SequenceProcessor", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SequenceProcessor:\n \"\"\"Process given input into Sequences.\"\"\"\n\n def __init__(self, project):\n \"\"\"Set up local variables for the SequenceProcessor.\"\"\"\n <|body_0|>\n\n def remove_stops(self, words):\n \"\"\"Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.\"\"\"\n <|body_1|>\n\n def process(self, sentence, sequence_dict=None):\n \"\"\"Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.\"\"\"\n <|body_2|>\n\n def get_sequence(self, sentence, i, j):\n \"\"\"Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.project = project\n self.previously_indexed = []\n self.logger = logging.getLogger(__name__)\n self.project_logger = ProjectLogger(self.logger, project)\n<|end_body_0|>\n\n<|body_start_1|>\n without_stops = []\n for word in words:\n if word.word.lower() not in app.config['STOPWORDS']:\n without_stops.append(word)\n return without_stops\n<|end_body_1|>\n\n<|body_start_2|>\n sequences = []\n for i in range(0, len(sentence.words)):\n self.previously_indexed = []\n for j in range(i + 1, len(sentence.words) + 1):\n if j - i < 5:\n sequences.extend(self.get_sequence(sentence, i, j))\n if isinstance(sequence_dict, dict):\n for sequence in sequences:\n sequence_text = sequence['sequence']\n lemmatized = sequence['is_lemmatized']\n has_function_words = sequence['has_function_words']\n all_function_words = sequence['all_function_words']\n length = len(sequence['words'])\n position = sequence['start_position']\n key = sequence_text\n if key in sequence_dict.keys():\n sequence = sequence_dict[key]\n else:\n try:\n sequence = Sequence.query.filter_by(sequence=sequence_text).one()\n except MultipleResultsFound:\n self.project_logger.error('Duplicate records found for: %s', str(key))\n except NoResultFound:\n sequence = Sequence(sequence=sequence_text, lemmatized=lemmatized, has_function_words=has_function_words, all_function_words=all_function_words, length=length)\n sequence_dict[key] = sequence\n sentence.add_sequence(sequence=sequence, position=position, project=self.project, force=False)\n return sequences\n<|end_body_2|>\n\n<|body_start_3|>\n sequences = []\n wordlist = sentence.words[i:j]\n lemmatized_phrase = join_tws(wordlist, ' ', 'lemma')\n surface_phrase = join_tws(wordlist, ' ', 'word')\n if surface_phrase in self.previously_indexed:\n return sequences\n wordlist_nostops = self.remove_stops(wordlist)\n lemmatized_phrase_nostops = join_tws(wordlist_nostops, ' ', LEMMA)\n surface_phrase_nostops = join_tws(wordlist_nostops, ' ', WORD)\n has_stops = len(wordlist_nostops) < len(wordlist)\n lemmatized_has_stops = len(lemmatized_phrase_nostops) < len(lemmatized_phrase)\n all_stop_words = len(wordlist_nostops) == 0\n lemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase, 'is_lemmatized': False, 'has_function_words': has_stops, 'all_function_words': all_stop_words, 'words': wordlist})\n self.previously_indexed.append(surface_phrase)\n if has_stops and (not all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not surface_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase_nostops, 'is_lemmatized': False, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n self.previously_indexed.append(surface_phrase_nostops)\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase, 'is_lemmatized': True, 'has_function_words': lemmatized_has_stops, 'all_function_words': lemmatized_all_stop_words, 'words': wordlist})\n self.previously_indexed.append(lemmatized_phrase)\n if lemmatized_has_stops and (not lemmatized_all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not lemmatized_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase_nostops, 'is_lemmatized': True, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n return sequences\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000423", "length_bytes": 8706, "license_type": "no_license", "methods": [{"docstring": "Set up local variables for the SequenceProcessor.", "name": "__init__", "signature": "def __init__(self, project)"}, {"docstring": "Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.", "name": "remove_stops", "signature": "def remove_stops(self, words)"}, {"docstring": "Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.", "name": "process", "signature": "def process(self, sentence, sequence_dict=None)"}, {"docstring": "Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.", "name": "get_sequence", "signature": "def get_sequence(self, sentence, i, j)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_042504", "prompt": "Implement the Python class `SequenceProcessor` described below.\n\nClass description:\nProcess given input into Sequences.\n\nMethod signatures and docstrings:\n- def __init__(self, project): Set up local variables for the SequenceProcessor.\n- def remove_stops(self, words): Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.\n- def process(self, sentence, sequence_dict=None): Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.\n- def get_sequence(self, sentence, i, j): Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.", "prompted_full_text": "Implement the Python class `SequenceProcessor` described below.\n\nClass description:\nProcess given input into Sequences.\n\nMethod signatures and docstrings:\n- def __init__(self, project): Set up local variables for the SequenceProcessor.\n- def remove_stops(self, words): Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.\n- def process(self, sentence, sequence_dict=None): Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.\n- def get_sequence(self, sentence, i, j): Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.\n\n<|skeleton|>\nclass SequenceProcessor:\n \"\"\"Process given input into Sequences.\"\"\"\n\n def __init__(self, project):\n \"\"\"Set up local variables for the SequenceProcessor.\"\"\"\n <|body_0|>\n\n def remove_stops(self, words):\n \"\"\"Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.\"\"\"\n <|body_1|>\n\n def process(self, sentence, sequence_dict=None):\n \"\"\"Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.\"\"\"\n <|body_2|>\n\n def get_sequence(self, sentence, i, j):\n \"\"\"Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.project = project\n self.previously_indexed = []\n self.logger = logging.getLogger(__name__)\n self.project_logger = ProjectLogger(self.logger, project)\n<|end_body_0|>\n\n<|body_start_1|>\n without_stops = []\n for word in words:\n if word.word.lower() not in app.config['STOPWORDS']:\n without_stops.append(word)\n return without_stops\n<|end_body_1|>\n\n<|body_start_2|>\n sequences = []\n for i in range(0, len(sentence.words)):\n self.previously_indexed = []\n for j in range(i + 1, len(sentence.words) + 1):\n if j - i < 5:\n sequences.extend(self.get_sequence(sentence, i, j))\n if isinstance(sequence_dict, dict):\n for sequence in sequences:\n sequence_text = sequence['sequence']\n lemmatized = sequence['is_lemmatized']\n has_function_words = sequence['has_function_words']\n all_function_words = sequence['all_function_words']\n length = len(sequence['words'])\n position = sequence['start_position']\n key = sequence_text\n if key in sequence_dict.keys():\n sequence = sequence_dict[key]\n else:\n try:\n sequence = Sequence.query.filter_by(sequence=sequence_text).one()\n except MultipleResultsFound:\n self.project_logger.error('Duplicate records found for: %s', str(key))\n except NoResultFound:\n sequence = Sequence(sequence=sequence_text, lemmatized=lemmatized, has_function_words=has_function_words, all_function_words=all_function_words, length=length)\n sequence_dict[key] = sequence\n sentence.add_sequence(sequence=sequence, position=position, project=self.project, force=False)\n return sequences\n<|end_body_2|>\n\n<|body_start_3|>\n sequences = []\n wordlist = sentence.words[i:j]\n lemmatized_phrase = join_tws(wordlist, ' ', 'lemma')\n surface_phrase = join_tws(wordlist, ' ', 'word')\n if surface_phrase in self.previously_indexed:\n return sequences\n wordlist_nostops = self.remove_stops(wordlist)\n lemmatized_phrase_nostops = join_tws(wordlist_nostops, ' ', LEMMA)\n surface_phrase_nostops = join_tws(wordlist_nostops, ' ', WORD)\n has_stops = len(wordlist_nostops) < len(wordlist)\n lemmatized_has_stops = len(lemmatized_phrase_nostops) < len(lemmatized_phrase)\n all_stop_words = len(wordlist_nostops) == 0\n lemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase, 'is_lemmatized': False, 'has_function_words': has_stops, 'all_function_words': all_stop_words, 'words': wordlist})\n self.previously_indexed.append(surface_phrase)\n if has_stops and (not all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not surface_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase_nostops, 'is_lemmatized': False, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n self.previously_indexed.append(surface_phrase_nostops)\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase, 'is_lemmatized': True, 'has_function_words': lemmatized_has_stops, 'all_function_words': lemmatized_all_stop_words, 'words': wordlist})\n self.previously_indexed.append(lemmatized_phrase)\n if lemmatized_has_stops and (not lemmatized_all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not lemmatized_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase_nostops, 'is_lemmatized': True, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n return sequences\n<|end_body_3|>\n", "revision_id": "93b90e6a8592a26c6efa09ea5f5aa4fab044f9d7", "skeleton": "<|skeleton|>\nclass SequenceProcessor:\n \"\"\"Process given input into Sequences.\"\"\"\n\n def __init__(self, project):\n \"\"\"Set up local variables for the SequenceProcessor.\"\"\"\n <|body_0|>\n\n def remove_stops(self, words):\n \"\"\"Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.\"\"\"\n <|body_1|>\n\n def process(self, sentence, sequence_dict=None):\n \"\"\"Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.\"\"\"\n <|body_2|>\n\n def get_sequence(self, sentence, i, j):\n \"\"\"Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SequenceProcessor:\n \"\"\"Process given input into Sequences.\"\"\"\n\n def __init__(self, project):\n \"\"\"Set up local variables for the SequenceProcessor.\"\"\"\n self.project = project\n self.previously_indexed = []\n self.logger = logging.getLogger(__name__)\n self.project_logger = ProjectLogger(self.logger, project)\n\n def remove_stops(self, words):\n \"\"\"Remove every sort of stop from the sentences. :param list words: A list of TaggedWord objects. :return list: The list without stops.\"\"\"\n without_stops = []\n for word in words:\n if word.word.lower() not in app.config['STOPWORDS']:\n without_stops.append(word)\n return without_stops\n\n def process(self, sentence, sequence_dict=None):\n \"\"\"Iterate and record every four word long sequence. The method records using the ReaderWriter a list of sequences present in the given sentence. :param Sentence sentence: The sentence to process, :return list: A list of Sequence objects, representing the results of processing. These sequences are also sent to the ReaderWriter.\"\"\"\n sequences = []\n for i in range(0, len(sentence.words)):\n self.previously_indexed = []\n for j in range(i + 1, len(sentence.words) + 1):\n if j - i < 5:\n sequences.extend(self.get_sequence(sentence, i, j))\n if isinstance(sequence_dict, dict):\n for sequence in sequences:\n sequence_text = sequence['sequence']\n lemmatized = sequence['is_lemmatized']\n has_function_words = sequence['has_function_words']\n all_function_words = sequence['all_function_words']\n length = len(sequence['words'])\n position = sequence['start_position']\n key = sequence_text\n if key in sequence_dict.keys():\n sequence = sequence_dict[key]\n else:\n try:\n sequence = Sequence.query.filter_by(sequence=sequence_text).one()\n except MultipleResultsFound:\n self.project_logger.error('Duplicate records found for: %s', str(key))\n except NoResultFound:\n sequence = Sequence(sequence=sequence_text, lemmatized=lemmatized, has_function_words=has_function_words, all_function_words=all_function_words, length=length)\n sequence_dict[key] = sequence\n sentence.add_sequence(sequence=sequence, position=position, project=self.project, force=False)\n return sequences\n\n def get_sequence(self, sentence, i, j):\n \"\"\"Handle the main processing part in the process() loop. :param Sentence sentence: A sentence object to create sequences from. :param int i: The index to start the sequence from, inclusive. :param int j: The index to stop the sequence from, exclusive. :return list: A list of dicts representing sequences.\"\"\"\n sequences = []\n wordlist = sentence.words[i:j]\n lemmatized_phrase = join_tws(wordlist, ' ', 'lemma')\n surface_phrase = join_tws(wordlist, ' ', 'word')\n if surface_phrase in self.previously_indexed:\n return sequences\n wordlist_nostops = self.remove_stops(wordlist)\n lemmatized_phrase_nostops = join_tws(wordlist_nostops, ' ', LEMMA)\n surface_phrase_nostops = join_tws(wordlist_nostops, ' ', WORD)\n has_stops = len(wordlist_nostops) < len(wordlist)\n lemmatized_has_stops = len(lemmatized_phrase_nostops) < len(lemmatized_phrase)\n all_stop_words = len(wordlist_nostops) == 0\n lemmatized_all_stop_words = len(lemmatized_phrase_nostops) == 0\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase, 'is_lemmatized': False, 'has_function_words': has_stops, 'all_function_words': all_stop_words, 'words': wordlist})\n self.previously_indexed.append(surface_phrase)\n if has_stops and (not all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not surface_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': surface_phrase_nostops, 'is_lemmatized': False, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n self.previously_indexed.append(surface_phrase_nostops)\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase, 'is_lemmatized': True, 'has_function_words': lemmatized_has_stops, 'all_function_words': lemmatized_all_stop_words, 'words': wordlist})\n self.previously_indexed.append(lemmatized_phrase)\n if lemmatized_has_stops and (not lemmatized_all_stop_words) and (wordlist_nostops[0] == wordlist[0]) and (not lemmatized_phrase_nostops in self.previously_indexed):\n sequences.append({'start_position': i, 'sentence_id': sentence.id, 'document_id': sentence.document_id, 'sequence': lemmatized_phrase_nostops, 'is_lemmatized': True, 'has_function_words': False, 'all_function_words': False, 'words': wordlist_nostops})\n return sequences\n", "source": "the_stack_v2_python_sparse", "source_path": "app/preprocessor/sequenceprocessor.py", "source_repo": "xiaobaozi34/wordseer", "split": "test", "star_events_count": 0} {"blob_id": "69bceb54dec8816759c6f81b32d65701aa1f2707", "bodies": ["self.graph_distance = graph_distance\nself.dtype = object\nself.explicit_H = explicit_H\nself.use_chirality = use_chirality\nif isinstance(max_pair_distance, int) and max_pair_distance <= 0:\n raise ValueError('max_pair_distance must either be a positive integer or None')\nself.max_pair_distance = max_pair_distance\nif self.use_chirality:\n self.bt_len = int(GraphConvConstants.bond_fdim_base) + len(GraphConvConstants.possible_bond_stereo)\nelse:\n self.bt_len = int(GraphConvConstants.bond_fdim_base)", "idx_nodes = [(a.GetIdx(), atom_features(a, explicit_H=self.explicit_H, use_chirality=self.use_chirality)) for a in mol.GetAtoms()]\nidx_nodes.sort()\nidx, nodes = list(zip(*idx_nodes))\nnodes = np.vstack(nodes)\nbond_features_map = {}\nfor b in mol.GetBonds():\n bond_features_map[tuple(sorted([b.GetBeginAtomIdx(), b.GetEndAtomIdx()]))] = bond_features(b, use_chirality=self.use_chirality)\nbond_adj_list = [[] for mol_id in range(len(nodes))]\nfor bond in bond_features_map.keys():\n bond_adj_list[bond[0]].append(bond[1])\n bond_adj_list[bond[1]].append(bond[0])\npairs, pair_edges = pair_features(mol, bond_features_map, bond_adj_list, bt_len=self.bt_len, graph_distance=self.graph_distance, max_pair_distance=self.max_pair_distance)\nreturn WeaveMol(nodes, pairs, pair_edges)"], "bodies_text": "<|body_start_0|>\n self.graph_distance = graph_distance\n self.dtype = object\n self.explicit_H = explicit_H\n self.use_chirality = use_chirality\n if isinstance(max_pair_distance, int) and max_pair_distance <= 0:\n raise ValueError('max_pair_distance must either be a positive integer or None')\n self.max_pair_distance = max_pair_distance\n if self.use_chirality:\n self.bt_len = int(GraphConvConstants.bond_fdim_base) + len(GraphConvConstants.possible_bond_stereo)\n else:\n self.bt_len = int(GraphConvConstants.bond_fdim_base)\n<|end_body_0|>\n\n<|body_start_1|>\n idx_nodes = [(a.GetIdx(), atom_features(a, explicit_H=self.explicit_H, use_chirality=self.use_chirality)) for a in mol.GetAtoms()]\n idx_nodes.sort()\n idx, nodes = list(zip(*idx_nodes))\n nodes = np.vstack(nodes)\n bond_features_map = {}\n for b in mol.GetBonds():\n bond_features_map[tuple(sorted([b.GetBeginAtomIdx(), b.GetEndAtomIdx()]))] = bond_features(b, use_chirality=self.use_chirality)\n bond_adj_list = [[] for mol_id in range(len(nodes))]\n for bond in bond_features_map.keys():\n bond_adj_list[bond[0]].append(bond[1])\n bond_adj_list[bond[1]].append(bond[0])\n pairs, pair_edges = pair_features(mol, bond_features_map, bond_adj_list, bt_len=self.bt_len, graph_distance=self.graph_distance, max_pair_distance=self.max_pair_distance)\n return WeaveMol(nodes, pairs, pair_edges)\n<|end_body_1|>\n", "class_docstring": "This class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f", "class_name": "WeaveFeaturizer", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WeaveFeaturizer:\n \"\"\"This class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f\"\"\"\n\n def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None):\n \"\"\"Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai\"\"\"\n <|body_0|>\n\n def _featurize(self, mol):\n \"\"\"Encodes mol as a WeaveMol object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph_distance = graph_distance\n self.dtype = object\n self.explicit_H = explicit_H\n self.use_chirality = use_chirality\n if isinstance(max_pair_distance, int) and max_pair_distance <= 0:\n raise ValueError('max_pair_distance must either be a positive integer or None')\n self.max_pair_distance = max_pair_distance\n if self.use_chirality:\n self.bt_len = int(GraphConvConstants.bond_fdim_base) + len(GraphConvConstants.possible_bond_stereo)\n else:\n self.bt_len = int(GraphConvConstants.bond_fdim_base)\n<|end_body_0|>\n\n<|body_start_1|>\n idx_nodes = [(a.GetIdx(), atom_features(a, explicit_H=self.explicit_H, use_chirality=self.use_chirality)) for a in mol.GetAtoms()]\n idx_nodes.sort()\n idx, nodes = list(zip(*idx_nodes))\n nodes = np.vstack(nodes)\n bond_features_map = {}\n for b in mol.GetBonds():\n bond_features_map[tuple(sorted([b.GetBeginAtomIdx(), b.GetEndAtomIdx()]))] = bond_features(b, use_chirality=self.use_chirality)\n bond_adj_list = [[] for mol_id in range(len(nodes))]\n for bond in bond_features_map.keys():\n bond_adj_list[bond[0]].append(bond[1])\n bond_adj_list[bond[1]].append(bond[0])\n pairs, pair_edges = pair_features(mol, bond_features_map, bond_adj_list, bt_len=self.bt_len, graph_distance=self.graph_distance, max_pair_distance=self.max_pair_distance)\n return WeaveMol(nodes, pairs, pair_edges)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000424", "length_bytes": 38890, "license_type": "permissive", "methods": [{"docstring": "Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai", "name": "__init__", "signature": "def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None)"}, {"docstring": "Encodes mol as a WeaveMol object.", "name": "_featurize", "signature": "def _featurize(self, mol)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028785", "prompt": "Implement the Python class `WeaveFeaturizer` described below.\n\nClass description:\nThis class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f\n\nMethod signatures and docstrings:\n- def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None): Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai\n- def _featurize(self, mol): Encodes mol as a WeaveMol object.", "prompted_full_text": "Implement the Python class `WeaveFeaturizer` described below.\n\nClass description:\nThis class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f\n\nMethod signatures and docstrings:\n- def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None): Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai\n- def _featurize(self, mol): Encodes mol as a WeaveMol object.\n\n<|skeleton|>\nclass WeaveFeaturizer:\n \"\"\"This class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f\"\"\"\n\n def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None):\n \"\"\"Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai\"\"\"\n <|body_0|>\n\n def _featurize(self, mol):\n \"\"\"Encodes mol as a WeaveMol object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.graph_distance = graph_distance\n self.dtype = object\n self.explicit_H = explicit_H\n self.use_chirality = use_chirality\n if isinstance(max_pair_distance, int) and max_pair_distance <= 0:\n raise ValueError('max_pair_distance must either be a positive integer or None')\n self.max_pair_distance = max_pair_distance\n if self.use_chirality:\n self.bt_len = int(GraphConvConstants.bond_fdim_base) + len(GraphConvConstants.possible_bond_stereo)\n else:\n self.bt_len = int(GraphConvConstants.bond_fdim_base)\n<|end_body_0|>\n\n<|body_start_1|>\n idx_nodes = [(a.GetIdx(), atom_features(a, explicit_H=self.explicit_H, use_chirality=self.use_chirality)) for a in mol.GetAtoms()]\n idx_nodes.sort()\n idx, nodes = list(zip(*idx_nodes))\n nodes = np.vstack(nodes)\n bond_features_map = {}\n for b in mol.GetBonds():\n bond_features_map[tuple(sorted([b.GetBeginAtomIdx(), b.GetEndAtomIdx()]))] = bond_features(b, use_chirality=self.use_chirality)\n bond_adj_list = [[] for mol_id in range(len(nodes))]\n for bond in bond_features_map.keys():\n bond_adj_list[bond[0]].append(bond[1])\n bond_adj_list[bond[1]].append(bond[0])\n pairs, pair_edges = pair_features(mol, bond_features_map, bond_adj_list, bt_len=self.bt_len, graph_distance=self.graph_distance, max_pair_distance=self.max_pair_distance)\n return WeaveMol(nodes, pairs, pair_edges)\n<|end_body_1|>\n", "revision_id": "ee6e67ebcf7bf04259cf13aff6388e2b791fea3d", "skeleton": "<|skeleton|>\nclass WeaveFeaturizer:\n \"\"\"This class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f\"\"\"\n\n def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None):\n \"\"\"Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai\"\"\"\n <|body_0|>\n\n def _featurize(self, mol):\n \"\"\"Encodes mol as a WeaveMol object.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WeaveFeaturizer:\n \"\"\"This class implements the featurization to implement Weave convolutions. Weave convolutions were introduced in [1]_. Unlike Duvenaud graph convolutions, weave convolutions require a quadratic matrix of interaction descriptors for each pair of atoms. These extra descriptors may provide for additional descriptive power but at the cost of a larger featurized dataset. Examples -------- >>> import deepchem as dc >>> mols = [\"CCC\"] >>> featurizer = dc.feat.WeaveFeaturizer() >>> features = featurizer.featurize(mols) >>> type(features[0]) >>> features[0].get_num_atoms() # 3 atoms in compound 3 >>> features[0].get_num_features() # feature size 75 >>> type(f\"\"\"\n\n def __init__(self, graph_distance: bool=True, explicit_H: bool=False, use_chirality: bool=False, max_pair_distance: Optional[int]=None):\n \"\"\"Initialize this featurizer with set parameters. Parameters ---------- graph_distance: bool, (default True) If True, use graph distance for distance features. Otherwise, use Euclidean distance. Note that this means that molecules that this featurizer is invoked on must have valid conformer information if this option is set. explicit_H: bool, (default False) If true, model hydrogens in the molecule. use_chirality: bool, (default False) If true, use chiral information in the featurization max_pair_distance: Optional[int], (default None) This value can be a positive integer or None. This parameter determines the maximum graph distance at which pair features are computed. For example, if `max_pai\"\"\"\n self.graph_distance = graph_distance\n self.dtype = object\n self.explicit_H = explicit_H\n self.use_chirality = use_chirality\n if isinstance(max_pair_distance, int) and max_pair_distance <= 0:\n raise ValueError('max_pair_distance must either be a positive integer or None')\n self.max_pair_distance = max_pair_distance\n if self.use_chirality:\n self.bt_len = int(GraphConvConstants.bond_fdim_base) + len(GraphConvConstants.possible_bond_stereo)\n else:\n self.bt_len = int(GraphConvConstants.bond_fdim_base)\n\n def _featurize(self, mol):\n \"\"\"Encodes mol as a WeaveMol object.\"\"\"\n idx_nodes = [(a.GetIdx(), atom_features(a, explicit_H=self.explicit_H, use_chirality=self.use_chirality)) for a in mol.GetAtoms()]\n idx_nodes.sort()\n idx, nodes = list(zip(*idx_nodes))\n nodes = np.vstack(nodes)\n bond_features_map = {}\n for b in mol.GetBonds():\n bond_features_map[tuple(sorted([b.GetBeginAtomIdx(), b.GetEndAtomIdx()]))] = bond_features(b, use_chirality=self.use_chirality)\n bond_adj_list = [[] for mol_id in range(len(nodes))]\n for bond in bond_features_map.keys():\n bond_adj_list[bond[0]].append(bond[1])\n bond_adj_list[bond[1]].append(bond[0])\n pairs, pair_edges = pair_features(mol, bond_features_map, bond_adj_list, bt_len=self.bt_len, graph_distance=self.graph_distance, max_pair_distance=self.max_pair_distance)\n return WeaveMol(nodes, pairs, pair_edges)\n", "source": "the_stack_v2_python_sparse", "source_path": "deepchem/feat/graph_features.py", "source_repo": "deepchem/deepchem", "split": "test", "star_events_count": 4876} {"blob_id": "c4462a72d44fc2dc075521afff94e6a17cc56745", "bodies": ["url = '/dialrecords/missedcalls/'\ncalled_number = '008618664565400'\ncalling_number = '008618664565402'\ncalling_date = '2013-10-25T06:30:59Z'\ndata = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\nresponse = self.client.post(url, data)\nself.assertEqual(response.status_code, 201)\nurl = '/dialrecords/missedcalls/' + called_number + '/'\nresponse = self.client.get(url)\nrecord_list = json.loads(response.content)\nself.assertEqual(called_number, record_list[-1]['called_number'])\nself.assertEqual(calling_number, record_list[-1]['calling_number'])\nself.assertEqual(calling_date, record_list[-1]['calling_date'])", "times = 9\nurl = '/dialrecords/missedcalls/'\ncalled_number = '008618664565400'\ncalling_number = '008618664565402'\ncalling_date = '2013-10-25T06:30:59Z'\ndata = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\nfor i in range(0, times):\n self.client.post(url, data)\nurl = '/dialrecords/missedcalls/' + called_number + '/badge/'\nresponse = self.client.get(url)\nself.assertEqual(response.content, str(times))\nurl = '/dialrecords/missedcalls/' + called_number + '/'\nresponse = self.client.get(url)\nrecord_list = json.loads(response.content)\nself.assertEqual(len(record_list), times)", "url = '/dialrecords/missedcalls/'\ncalled_number = '008618664565400'\ncalling_number = '008618664565402'\ncalling_date = '2013-10-25T06:30:59Z'\ndata = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\nresponse = self.client.post(url, data)\nself.assertEqual(response.status_code, 201)\nurl = '/dialrecords/missedcalls/' + called_number + '/badge/'\nresponse = self.client.get(url)\nself.assertEqual(response.content, '1')\nurl = '/dialrecords/missedcalls/' + called_number + '/'\nself.client.delete(url)\nresponse = self.client.get(url)\nself.assertEqual(response.content, '[]')\nurl = '/dialrecords/missedcalls/' + called_number + '/badge/'\nresponse = self.client.get(url)\nself.assertEqual(response.content, '0')"], "bodies_text": "<|body_start_0|>\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(called_number, record_list[-1]['called_number'])\n self.assertEqual(calling_number, record_list[-1]['calling_number'])\n self.assertEqual(calling_date, record_list[-1]['calling_date'])\n<|end_body_0|>\n\n<|body_start_1|>\n times = 9\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n for i in range(0, times):\n self.client.post(url, data)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, str(times))\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(len(record_list), times)\n<|end_body_1|>\n\n<|body_start_2|>\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '1')\n url = '/dialrecords/missedcalls/' + called_number + '/'\n self.client.delete(url)\n response = self.client.get(url)\n self.assertEqual(response.content, '[]')\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '0')\n<|end_body_2|>\n", "class_docstring": "", "class_name": "RecordMissedCallsTest", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RecordMissedCallsTest:\n\n def test_record_a_new_missed_call(self):\n \"\"\"Record a new missed call. One new record should be inserted into DB.\"\"\"\n <|body_0|>\n\n def test_record_multiple_new_missed_call(self):\n \"\"\"Record multiple new missed calls. Multiple new record should be inserted into DB.\"\"\"\n <|body_1|>\n\n def test_delete_records(self):\n \"\"\"When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(called_number, record_list[-1]['called_number'])\n self.assertEqual(calling_number, record_list[-1]['calling_number'])\n self.assertEqual(calling_date, record_list[-1]['calling_date'])\n<|end_body_0|>\n\n<|body_start_1|>\n times = 9\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n for i in range(0, times):\n self.client.post(url, data)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, str(times))\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(len(record_list), times)\n<|end_body_1|>\n\n<|body_start_2|>\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '1')\n url = '/dialrecords/missedcalls/' + called_number + '/'\n self.client.delete(url)\n response = self.client.get(url)\n self.assertEqual(response.content, '[]')\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '0')\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000425", "length_bytes": 3391, "license_type": "no_license", "methods": [{"docstring": "Record a new missed call. One new record should be inserted into DB.", "name": "test_record_a_new_missed_call", "signature": "def test_record_a_new_missed_call(self)"}, {"docstring": "Record multiple new missed calls. Multiple new record should be inserted into DB.", "name": "test_record_multiple_new_missed_call", "signature": "def test_record_multiple_new_missed_call(self)"}, {"docstring": "When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.", "name": "test_delete_records", "signature": "def test_delete_records(self)"}], "n_methods": 3, "prompt": "Implement the Python class `RecordMissedCallsTest` described below.\n\nClass description:\nImplement the RecordMissedCallsTest class.\n\nMethod signatures and docstrings:\n- def test_record_a_new_missed_call(self): Record a new missed call. One new record should be inserted into DB.\n- def test_record_multiple_new_missed_call(self): Record multiple new missed calls. Multiple new record should be inserted into DB.\n- def test_delete_records(self): When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.", "prompted_full_text": "Implement the Python class `RecordMissedCallsTest` described below.\n\nClass description:\nImplement the RecordMissedCallsTest class.\n\nMethod signatures and docstrings:\n- def test_record_a_new_missed_call(self): Record a new missed call. One new record should be inserted into DB.\n- def test_record_multiple_new_missed_call(self): Record multiple new missed calls. Multiple new record should be inserted into DB.\n- def test_delete_records(self): When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.\n\n<|skeleton|>\nclass RecordMissedCallsTest:\n\n def test_record_a_new_missed_call(self):\n \"\"\"Record a new missed call. One new record should be inserted into DB.\"\"\"\n <|body_0|>\n\n def test_record_multiple_new_missed_call(self):\n \"\"\"Record multiple new missed calls. Multiple new record should be inserted into DB.\"\"\"\n <|body_1|>\n\n def test_delete_records(self):\n \"\"\"When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(called_number, record_list[-1]['called_number'])\n self.assertEqual(calling_number, record_list[-1]['calling_number'])\n self.assertEqual(calling_date, record_list[-1]['calling_date'])\n<|end_body_0|>\n\n<|body_start_1|>\n times = 9\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n for i in range(0, times):\n self.client.post(url, data)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, str(times))\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(len(record_list), times)\n<|end_body_1|>\n\n<|body_start_2|>\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '1')\n url = '/dialrecords/missedcalls/' + called_number + '/'\n self.client.delete(url)\n response = self.client.get(url)\n self.assertEqual(response.content, '[]')\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '0')\n<|end_body_2|>\n", "revision_id": "95ace626e843061c1d4ee4c7b3ec8f2bd5e0021e", "skeleton": "<|skeleton|>\nclass RecordMissedCallsTest:\n\n def test_record_a_new_missed_call(self):\n \"\"\"Record a new missed call. One new record should be inserted into DB.\"\"\"\n <|body_0|>\n\n def test_record_multiple_new_missed_call(self):\n \"\"\"Record multiple new missed calls. Multiple new record should be inserted into DB.\"\"\"\n <|body_1|>\n\n def test_delete_records(self):\n \"\"\"When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RecordMissedCallsTest:\n def test_record_a_new_missed_call(self):\n \"\"\"Record a new missed call. One new record should be inserted into DB.\"\"\"\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(called_number, record_list[-1]['called_number'])\n self.assertEqual(calling_number, record_list[-1]['calling_number'])\n self.assertEqual(calling_date, record_list[-1]['calling_date'])\n\n def test_record_multiple_new_missed_call(self):\n \"\"\"Record multiple new missed calls. Multiple new record should be inserted into DB.\"\"\"\n times = 9\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n for i in range(0, times):\n self.client.post(url, data)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, str(times))\n url = '/dialrecords/missedcalls/' + called_number + '/'\n response = self.client.get(url)\n record_list = json.loads(response.content)\n self.assertEqual(len(record_list), times)\n\n def test_delete_records(self):\n \"\"\"When user has fetched missed calls, they should delete them in DB. Test whether fetched records has been deleted.\"\"\"\n url = '/dialrecords/missedcalls/'\n called_number = '008618664565400'\n calling_number = '008618664565402'\n calling_date = '2013-10-25T06:30:59Z'\n data = {'called_number': called_number, 'calling_number': calling_number, 'calling_date': calling_date}\n response = self.client.post(url, data)\n self.assertEqual(response.status_code, 201)\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '1')\n url = '/dialrecords/missedcalls/' + called_number + '/'\n self.client.delete(url)\n response = self.client.get(url)\n self.assertEqual(response.content, '[]')\n url = '/dialrecords/missedcalls/' + called_number + '/badge/'\n response = self.client.get(url)\n self.assertEqual(response.content, '0')\n", "source": "the_stack_v2_python_sparse", "source_path": "dialrecords/tests.py", "source_repo": "Zhe-Zhu/Qianli-server", "split": "test", "star_events_count": 0} {"blob_id": "6bc4d93ab82b5b45e0edaab7902b040b5c561a9b", "bodies": ["l = open(list_file).readlines()\nl = [i.split() for i in l]\nl = [(i[0], i[2]) for i in l]\nd = {}\nfor matrix_file, name in l:\n filename = '%s.pfm' % (matrix_file,)\n matrix_file = os.path.join(matrix_dir, filename)\n d[name] = PWM(self.load_matrix(matrix_file))\nreturn d", "f = open(matrix_file)\nl = f.readlines()\nl = [i.split() for i in l if len(i.strip())]\nl = [map(float, i) for i in l]\nassert len(l) == 4\nlens = map(len, l)\nassert min(lens) == max(lens)\nsite_len = min(lens)\nmat = []\nfor i in range(0, site_len):\n A = l[0][i]\n C = l[1][i]\n G = l[2][i]\n T = l[3][i]\n N = 0\n mat.append((A, C, G, T, N))\nreturn mat"], "bodies_text": "<|body_start_0|>\n l = open(list_file).readlines()\n l = [i.split() for i in l]\n l = [(i[0], i[2]) for i in l]\n d = {}\n for matrix_file, name in l:\n filename = '%s.pfm' % (matrix_file,)\n matrix_file = os.path.join(matrix_dir, filename)\n d[name] = PWM(self.load_matrix(matrix_file))\n return d\n<|end_body_0|>\n\n<|body_start_1|>\n f = open(matrix_file)\n l = f.readlines()\n l = [i.split() for i in l if len(i.strip())]\n l = [map(float, i) for i in l]\n assert len(l) == 4\n lens = map(len, l)\n assert min(lens) == max(lens)\n site_len = min(lens)\n mat = []\n for i in range(0, site_len):\n A = l[0][i]\n C = l[1][i]\n G = l[2][i]\n T = l[3][i]\n N = 0\n mat.append((A, C, G, T, N))\n return mat\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Jaspar", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Jaspar:\n\n def load(self, list_file, matrix_dir):\n \"\"\"Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.\"\"\"\n <|body_0|>\n\n def load_matrix(self, matrix_file):\n \"\"\"Load an individual matrix into a motility-compatible format.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = open(list_file).readlines()\n l = [i.split() for i in l]\n l = [(i[0], i[2]) for i in l]\n d = {}\n for matrix_file, name in l:\n filename = '%s.pfm' % (matrix_file,)\n matrix_file = os.path.join(matrix_dir, filename)\n d[name] = PWM(self.load_matrix(matrix_file))\n return d\n<|end_body_0|>\n\n<|body_start_1|>\n f = open(matrix_file)\n l = f.readlines()\n l = [i.split() for i in l if len(i.strip())]\n l = [map(float, i) for i in l]\n assert len(l) == 4\n lens = map(len, l)\n assert min(lens) == max(lens)\n site_len = min(lens)\n mat = []\n for i in range(0, site_len):\n A = l[0][i]\n C = l[1][i]\n G = l[2][i]\n T = l[3][i]\n N = 0\n mat.append((A, C, G, T, N))\n return mat\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000426", "length_bytes": 1568, "license_type": "no_license", "methods": [{"docstring": "Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.", "name": "load", "signature": "def load(self, list_file, matrix_dir)"}, {"docstring": "Load an individual matrix into a motility-compatible format.", "name": "load_matrix", "signature": "def load_matrix(self, matrix_file)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_035382", "prompt": "Implement the Python class `Jaspar` described below.\n\nClass description:\nImplement the Jaspar class.\n\nMethod signatures and docstrings:\n- def load(self, list_file, matrix_dir): Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.\n- def load_matrix(self, matrix_file): Load an individual matrix into a motility-compatible format.", "prompted_full_text": "Implement the Python class `Jaspar` described below.\n\nClass description:\nImplement the Jaspar class.\n\nMethod signatures and docstrings:\n- def load(self, list_file, matrix_dir): Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.\n- def load_matrix(self, matrix_file): Load an individual matrix into a motility-compatible format.\n\n<|skeleton|>\nclass Jaspar:\n\n def load(self, list_file, matrix_dir):\n \"\"\"Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.\"\"\"\n <|body_0|>\n\n def load_matrix(self, matrix_file):\n \"\"\"Load an individual matrix into a motility-compatible format.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n l = open(list_file).readlines()\n l = [i.split() for i in l]\n l = [(i[0], i[2]) for i in l]\n d = {}\n for matrix_file, name in l:\n filename = '%s.pfm' % (matrix_file,)\n matrix_file = os.path.join(matrix_dir, filename)\n d[name] = PWM(self.load_matrix(matrix_file))\n return d\n<|end_body_0|>\n\n<|body_start_1|>\n f = open(matrix_file)\n l = f.readlines()\n l = [i.split() for i in l if len(i.strip())]\n l = [map(float, i) for i in l]\n assert len(l) == 4\n lens = map(len, l)\n assert min(lens) == max(lens)\n site_len = min(lens)\n mat = []\n for i in range(0, site_len):\n A = l[0][i]\n C = l[1][i]\n G = l[2][i]\n T = l[3][i]\n N = 0\n mat.append((A, C, G, T, N))\n return mat\n<|end_body_1|>\n", "revision_id": "7b3f4da9cdefd7680f07b707339aee59faece1d2", "skeleton": "<|skeleton|>\nclass Jaspar:\n\n def load(self, list_file, matrix_dir):\n \"\"\"Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.\"\"\"\n <|body_0|>\n\n def load_matrix(self, matrix_file):\n \"\"\"Load an individual matrix into a motility-compatible format.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Jaspar:\n def load(self, list_file, matrix_dir):\n \"\"\"Load all of the matrices in list_file from matrix_dir. Return a dictionary of PWM objects.\"\"\"\n l = open(list_file).readlines()\n l = [i.split() for i in l]\n l = [(i[0], i[2]) for i in l]\n d = {}\n for matrix_file, name in l:\n filename = '%s.pfm' % (matrix_file,)\n matrix_file = os.path.join(matrix_dir, filename)\n d[name] = PWM(self.load_matrix(matrix_file))\n return d\n\n def load_matrix(self, matrix_file):\n \"\"\"Load an individual matrix into a motility-compatible format.\"\"\"\n f = open(matrix_file)\n l = f.readlines()\n l = [i.split() for i in l if len(i.strip())]\n l = [map(float, i) for i in l]\n assert len(l) == 4\n lens = map(len, l)\n assert min(lens) == max(lens)\n site_len = min(lens)\n mat = []\n for i in range(0, site_len):\n A = l[0][i]\n C = l[1][i]\n G = l[2][i]\n T = l[3][i]\n N = 0\n mat.append((A, C, G, T, N))\n return mat\n", "source": "the_stack_v2_python_sparse", "source_path": "TranscriptionFactors/JASPAR.py", "source_repo": "neilrobertson/BICRCode", "split": "test", "star_events_count": 0} {"blob_id": "3e39b500c0c9cf57bbdc94bcea97dd328d191023", "bodies": ["super().__init__(*args, **kwargs)\nself._registry.update(admin.site._registry)\nself._actions = {}\nself._global_actions = {}", "urls = admin.site.get_urls()\nextra_urls = [path('create-user/', depot_views.create_user, name='admin-user-creation'), path('create-permissions-contenttypes/', depot_views.create_permissions_content_types, name='temp-permissions-contenttypes'), path('favicon.ico', RedirectView.as_view(url='/static/images/favicon.ico'))]\nreturn urls + extra_urls"], "bodies_text": "<|body_start_0|>\n super().__init__(*args, **kwargs)\n self._registry.update(admin.site._registry)\n self._actions = {}\n self._global_actions = {}\n<|end_body_0|>\n\n<|body_start_1|>\n urls = admin.site.get_urls()\n extra_urls = [path('create-user/', depot_views.create_user, name='admin-user-creation'), path('create-permissions-contenttypes/', depot_views.create_permissions_content_types, name='temp-permissions-contenttypes'), path('favicon.ico', RedirectView.as_view(url='/static/images/favicon.ico'))]\n return urls + extra_urls\n<|end_body_1|>\n", "class_docstring": "Depot Admin Site Class.", "class_name": "DepotAdminSite", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DepotAdminSite:\n \"\"\"Depot Admin Site Class.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize admin site.\"\"\"\n <|body_0|>\n\n def get_urls(self):\n \"\"\"Return admin urls with extra urls.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self._registry.update(admin.site._registry)\n self._actions = {}\n self._global_actions = {}\n<|end_body_0|>\n\n<|body_start_1|>\n urls = admin.site.get_urls()\n extra_urls = [path('create-user/', depot_views.create_user, name='admin-user-creation'), path('create-permissions-contenttypes/', depot_views.create_permissions_content_types, name='temp-permissions-contenttypes'), path('favicon.ico', RedirectView.as_view(url='/static/images/favicon.ico'))]\n return urls + extra_urls\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000427", "length_bytes": 1128, "license_type": "no_license", "methods": [{"docstring": "Initialize admin site.", "name": "__init__", "signature": "def __init__(self, *args, **kwargs)"}, {"docstring": "Return admin urls with extra urls.", "name": "get_urls", "signature": "def get_urls(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049735", "prompt": "Implement the Python class `DepotAdminSite` described below.\n\nClass description:\nDepot Admin Site Class.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize admin site.\n- def get_urls(self): Return admin urls with extra urls.", "prompted_full_text": "Implement the Python class `DepotAdminSite` described below.\n\nClass description:\nDepot Admin Site Class.\n\nMethod signatures and docstrings:\n- def __init__(self, *args, **kwargs): Initialize admin site.\n- def get_urls(self): Return admin urls with extra urls.\n\n<|skeleton|>\nclass DepotAdminSite:\n \"\"\"Depot Admin Site Class.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize admin site.\"\"\"\n <|body_0|>\n\n def get_urls(self):\n \"\"\"Return admin urls with extra urls.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(*args, **kwargs)\n self._registry.update(admin.site._registry)\n self._actions = {}\n self._global_actions = {}\n<|end_body_0|>\n\n<|body_start_1|>\n urls = admin.site.get_urls()\n extra_urls = [path('create-user/', depot_views.create_user, name='admin-user-creation'), path('create-permissions-contenttypes/', depot_views.create_permissions_content_types, name='temp-permissions-contenttypes'), path('favicon.ico', RedirectView.as_view(url='/static/images/favicon.ico'))]\n return urls + extra_urls\n<|end_body_1|>\n", "revision_id": "26ca47c726f2c38211247a41d294e38a67cecb7f", "skeleton": "<|skeleton|>\nclass DepotAdminSite:\n \"\"\"Depot Admin Site Class.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize admin site.\"\"\"\n <|body_0|>\n\n def get_urls(self):\n \"\"\"Return admin urls with extra urls.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DepotAdminSite:\n \"\"\"Depot Admin Site Class.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize admin site.\"\"\"\n super().__init__(*args, **kwargs)\n self._registry.update(admin.site._registry)\n self._actions = {}\n self._global_actions = {}\n\n def get_urls(self):\n \"\"\"Return admin urls with extra urls.\"\"\"\n urls = admin.site.get_urls()\n extra_urls = [path('create-user/', depot_views.create_user, name='admin-user-creation'), path('create-permissions-contenttypes/', depot_views.create_permissions_content_types, name='temp-permissions-contenttypes'), path('favicon.ico', RedirectView.as_view(url='/static/images/favicon.ico'))]\n return urls + extra_urls\n", "source": "the_stack_v2_python_sparse", "source_path": "depot/depot_proj/admin.py", "source_repo": "rsenwar/depot", "split": "test", "star_events_count": 0} {"blob_id": "c7c6f1c1aa22369a7bc9f813e06569e18424d96a", "bodies": ["logging.info('========== enter_output_list ==========')\nself.driver.find_element_by_xpath(self.delivery_list).click()\nsleep(1)", "logging.info('========== filter_order ==========')\nself.driver.find_element_by_id(self.status_filter).click()\nsleep(0.5)\nself.driver.find_elements_by_class_name(self.check_label)[1].click()\nsleep(0.5)\nif not self.driver.find_elements_by_class_name(self.order_list):\n i = 1\n while True:\n self.driver.find_element_by_id(self.date_filter).click()\n sleep(0.5)\n conditions = self.driver.find_elements_by_class_name(self.check_label)\n conditions[i].click()\n sleep(0.5)\n if self.driver.find_elements_by_class_name(self.order_list):\n return self.driver.find_elements_by_class_name(self.order_list)\n i += 1\n if i == len(conditions):\n break\nreturn self.driver.find_elements_by_class_name(self.order_list)", "logging.info('========== single_output ==========')\norder_list = self.filter_order()\nif order_list:\n order_list[0].click()\n sleep(0.5)\n self.driver.find_element_by_id(self.stock_out).click()\n sleep(0.5)\n self.driver.find_element_by_xpath(self.confirm).click()\nelse:\n logging.info('没有待出库的订单')", "logging.info('========== batch_outbound ==========')\norder_list = self.filter_order()\nif order_list:\n self.driver.find_element_by_id(self.batch).click()\n sleep(1)\n self.driver.find_element_by_xpath(self.batch_confirm).click()\n sleep(5)\nelse:\n logging.info('没有待出库的订单')"], "bodies_text": "<|body_start_0|>\n logging.info('========== enter_output_list ==========')\n self.driver.find_element_by_xpath(self.delivery_list).click()\n sleep(1)\n<|end_body_0|>\n\n<|body_start_1|>\n logging.info('========== filter_order ==========')\n self.driver.find_element_by_id(self.status_filter).click()\n sleep(0.5)\n self.driver.find_elements_by_class_name(self.check_label)[1].click()\n sleep(0.5)\n if not self.driver.find_elements_by_class_name(self.order_list):\n i = 1\n while True:\n self.driver.find_element_by_id(self.date_filter).click()\n sleep(0.5)\n conditions = self.driver.find_elements_by_class_name(self.check_label)\n conditions[i].click()\n sleep(0.5)\n if self.driver.find_elements_by_class_name(self.order_list):\n return self.driver.find_elements_by_class_name(self.order_list)\n i += 1\n if i == len(conditions):\n break\n return self.driver.find_elements_by_class_name(self.order_list)\n<|end_body_1|>\n\n<|body_start_2|>\n logging.info('========== single_output ==========')\n order_list = self.filter_order()\n if order_list:\n order_list[0].click()\n sleep(0.5)\n self.driver.find_element_by_id(self.stock_out).click()\n sleep(0.5)\n self.driver.find_element_by_xpath(self.confirm).click()\n else:\n logging.info('没有待出库的订单')\n<|end_body_2|>\n\n<|body_start_3|>\n logging.info('========== batch_outbound ==========')\n order_list = self.filter_order()\n if order_list:\n self.driver.find_element_by_id(self.batch).click()\n sleep(1)\n self.driver.find_element_by_xpath(self.batch_confirm).click()\n sleep(5)\n else:\n logging.info('没有待出库的订单')\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Output", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Output:\n\n def enter_output_list(self):\n \"\"\"进入出库单列表\"\"\"\n <|body_0|>\n\n def filter_order(self):\n \"\"\"筛选待出库的订单\"\"\"\n <|body_1|>\n\n def single_output(self):\n \"\"\"APP单个出库\"\"\"\n <|body_2|>\n\n def batch_output(self):\n \"\"\"APP批量出库\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logging.info('========== enter_output_list ==========')\n self.driver.find_element_by_xpath(self.delivery_list).click()\n sleep(1)\n<|end_body_0|>\n\n<|body_start_1|>\n logging.info('========== filter_order ==========')\n self.driver.find_element_by_id(self.status_filter).click()\n sleep(0.5)\n self.driver.find_elements_by_class_name(self.check_label)[1].click()\n sleep(0.5)\n if not self.driver.find_elements_by_class_name(self.order_list):\n i = 1\n while True:\n self.driver.find_element_by_id(self.date_filter).click()\n sleep(0.5)\n conditions = self.driver.find_elements_by_class_name(self.check_label)\n conditions[i].click()\n sleep(0.5)\n if self.driver.find_elements_by_class_name(self.order_list):\n return self.driver.find_elements_by_class_name(self.order_list)\n i += 1\n if i == len(conditions):\n break\n return self.driver.find_elements_by_class_name(self.order_list)\n<|end_body_1|>\n\n<|body_start_2|>\n logging.info('========== single_output ==========')\n order_list = self.filter_order()\n if order_list:\n order_list[0].click()\n sleep(0.5)\n self.driver.find_element_by_id(self.stock_out).click()\n sleep(0.5)\n self.driver.find_element_by_xpath(self.confirm).click()\n else:\n logging.info('没有待出库的订单')\n<|end_body_2|>\n\n<|body_start_3|>\n logging.info('========== batch_outbound ==========')\n order_list = self.filter_order()\n if order_list:\n self.driver.find_element_by_id(self.batch).click()\n sleep(1)\n self.driver.find_element_by_xpath(self.batch_confirm).click()\n sleep(5)\n else:\n logging.info('没有待出库的订单')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000428", "length_bytes": 3560, "license_type": "no_license", "methods": [{"docstring": "进入出库单列表", "name": "enter_output_list", "signature": "def enter_output_list(self)"}, {"docstring": "筛选待出库的订单", "name": "filter_order", "signature": "def filter_order(self)"}, {"docstring": "APP单个出库", "name": "single_output", "signature": "def single_output(self)"}, {"docstring": "APP批量出库", "name": "batch_output", "signature": "def batch_output(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_test_002783", "prompt": "Implement the Python class `Output` described below.\n\nClass description:\nImplement the Output class.\n\nMethod signatures and docstrings:\n- def enter_output_list(self): 进入出库单列表\n- def filter_order(self): 筛选待出库的订单\n- def single_output(self): APP单个出库\n- def batch_output(self): APP批量出库", "prompted_full_text": "Implement the Python class `Output` described below.\n\nClass description:\nImplement the Output class.\n\nMethod signatures and docstrings:\n- def enter_output_list(self): 进入出库单列表\n- def filter_order(self): 筛选待出库的订单\n- def single_output(self): APP单个出库\n- def batch_output(self): APP批量出库\n\n<|skeleton|>\nclass Output:\n\n def enter_output_list(self):\n \"\"\"进入出库单列表\"\"\"\n <|body_0|>\n\n def filter_order(self):\n \"\"\"筛选待出库的订单\"\"\"\n <|body_1|>\n\n def single_output(self):\n \"\"\"APP单个出库\"\"\"\n <|body_2|>\n\n def batch_output(self):\n \"\"\"APP批量出库\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n logging.info('========== enter_output_list ==========')\n self.driver.find_element_by_xpath(self.delivery_list).click()\n sleep(1)\n<|end_body_0|>\n\n<|body_start_1|>\n logging.info('========== filter_order ==========')\n self.driver.find_element_by_id(self.status_filter).click()\n sleep(0.5)\n self.driver.find_elements_by_class_name(self.check_label)[1].click()\n sleep(0.5)\n if not self.driver.find_elements_by_class_name(self.order_list):\n i = 1\n while True:\n self.driver.find_element_by_id(self.date_filter).click()\n sleep(0.5)\n conditions = self.driver.find_elements_by_class_name(self.check_label)\n conditions[i].click()\n sleep(0.5)\n if self.driver.find_elements_by_class_name(self.order_list):\n return self.driver.find_elements_by_class_name(self.order_list)\n i += 1\n if i == len(conditions):\n break\n return self.driver.find_elements_by_class_name(self.order_list)\n<|end_body_1|>\n\n<|body_start_2|>\n logging.info('========== single_output ==========')\n order_list = self.filter_order()\n if order_list:\n order_list[0].click()\n sleep(0.5)\n self.driver.find_element_by_id(self.stock_out).click()\n sleep(0.5)\n self.driver.find_element_by_xpath(self.confirm).click()\n else:\n logging.info('没有待出库的订单')\n<|end_body_2|>\n\n<|body_start_3|>\n logging.info('========== batch_outbound ==========')\n order_list = self.filter_order()\n if order_list:\n self.driver.find_element_by_id(self.batch).click()\n sleep(1)\n self.driver.find_element_by_xpath(self.batch_confirm).click()\n sleep(5)\n else:\n logging.info('没有待出库的订单')\n<|end_body_3|>\n", "revision_id": "05a0430f134c59be968daa6fa60d72aa5a9fbc5e", "skeleton": "<|skeleton|>\nclass Output:\n\n def enter_output_list(self):\n \"\"\"进入出库单列表\"\"\"\n <|body_0|>\n\n def filter_order(self):\n \"\"\"筛选待出库的订单\"\"\"\n <|body_1|>\n\n def single_output(self):\n \"\"\"APP单个出库\"\"\"\n <|body_2|>\n\n def batch_output(self):\n \"\"\"APP批量出库\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Output:\n def enter_output_list(self):\n \"\"\"进入出库单列表\"\"\"\n logging.info('========== enter_output_list ==========')\n self.driver.find_element_by_xpath(self.delivery_list).click()\n sleep(1)\n\n def filter_order(self):\n \"\"\"筛选待出库的订单\"\"\"\n logging.info('========== filter_order ==========')\n self.driver.find_element_by_id(self.status_filter).click()\n sleep(0.5)\n self.driver.find_elements_by_class_name(self.check_label)[1].click()\n sleep(0.5)\n if not self.driver.find_elements_by_class_name(self.order_list):\n i = 1\n while True:\n self.driver.find_element_by_id(self.date_filter).click()\n sleep(0.5)\n conditions = self.driver.find_elements_by_class_name(self.check_label)\n conditions[i].click()\n sleep(0.5)\n if self.driver.find_elements_by_class_name(self.order_list):\n return self.driver.find_elements_by_class_name(self.order_list)\n i += 1\n if i == len(conditions):\n break\n return self.driver.find_elements_by_class_name(self.order_list)\n\n def single_output(self):\n \"\"\"APP单个出库\"\"\"\n logging.info('========== single_output ==========')\n order_list = self.filter_order()\n if order_list:\n order_list[0].click()\n sleep(0.5)\n self.driver.find_element_by_id(self.stock_out).click()\n sleep(0.5)\n self.driver.find_element_by_xpath(self.confirm).click()\n else:\n logging.info('没有待出库的订单')\n\n def batch_output(self):\n \"\"\"APP批量出库\"\"\"\n logging.info('========== batch_outbound ==========')\n order_list = self.filter_order()\n if order_list:\n self.driver.find_element_by_id(self.batch).click()\n sleep(1)\n self.driver.find_element_by_xpath(self.batch_confirm).click()\n sleep(5)\n else:\n logging.info('没有待出库的订单')\n", "source": "the_stack_v2_python_sparse", "source_path": "ccloud/businessView/output.py", "source_repo": "dopqob/Python", "split": "test", "star_events_count": 0} {"blob_id": "6f9cfaca5979ec78138348407f71106617c4e796", "bodies": ["form = super().get_form(*args, **kwargs)\nform.fields['when'].widget.widgets[0].attrs = {'placeholder': f'Start Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\nform.fields['when'].widget.widgets[1].attrs = {'placeholder': f'End Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\nif hasattr(form.fields, 'event_location'):\n form.fields['event_location'].queryset = EventLocation.objects.filter(camp=self.camp)\nreturn form", "context = super().get_context_data(*args, **kwargs)\nif not hasattr(self, 'event_type'):\n self.event_type = self.get_object().event_type\ncontext['event_type'] = self.event_type\nif not hasattr(self, 'event_location'):\n self.event_location = self.get_object().event_location\ncontext['event_location'] = self.event_location\ncontext['sessions'] = self.event_type.event_sessions.filter(camp=self.camp)\nreturn context"], "bodies_text": "<|body_start_0|>\n form = super().get_form(*args, **kwargs)\n form.fields['when'].widget.widgets[0].attrs = {'placeholder': f'Start Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n form.fields['when'].widget.widgets[1].attrs = {'placeholder': f'End Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n if hasattr(form.fields, 'event_location'):\n form.fields['event_location'].queryset = EventLocation.objects.filter(camp=self.camp)\n return form\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(*args, **kwargs)\n if not hasattr(self, 'event_type'):\n self.event_type = self.get_object().event_type\n context['event_type'] = self.event_type\n if not hasattr(self, 'event_location'):\n self.event_location = self.get_object().event_location\n context['event_location'] = self.event_location\n context['sessions'] = self.event_type.event_sessions.filter(camp=self.camp)\n return context\n<|end_body_1|>\n", "class_docstring": "A mixin with the stuff shared between EventSession{Create|Update}View", "class_name": "EventSessionFormViewMixin", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EventSessionFormViewMixin:\n \"\"\"A mixin with the stuff shared between EventSession{Create|Update}View\"\"\"\n\n def get_form(self, *args, **kwargs):\n \"\"\"The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.\"\"\"\n <|body_0|>\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add event_type and location and existing sessions to context\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form = super().get_form(*args, **kwargs)\n form.fields['when'].widget.widgets[0].attrs = {'placeholder': f'Start Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n form.fields['when'].widget.widgets[1].attrs = {'placeholder': f'End Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n if hasattr(form.fields, 'event_location'):\n form.fields['event_location'].queryset = EventLocation.objects.filter(camp=self.camp)\n return form\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(*args, **kwargs)\n if not hasattr(self, 'event_type'):\n self.event_type = self.get_object().event_type\n context['event_type'] = self.event_type\n if not hasattr(self, 'event_location'):\n self.event_location = self.get_object().event_location\n context['event_location'] = self.event_location\n context['sessions'] = self.event_type.event_sessions.filter(camp=self.camp)\n return context\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000429", "length_bytes": 33145, "license_type": "permissive", "methods": [{"docstring": "The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.", "name": "get_form", "signature": "def get_form(self, *args, **kwargs)"}, {"docstring": "Add event_type and location and existing sessions to context", "name": "get_context_data", "signature": "def get_context_data(self, *args, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021417", "prompt": "Implement the Python class `EventSessionFormViewMixin` described below.\n\nClass description:\nA mixin with the stuff shared between EventSession{Create|Update}View\n\nMethod signatures and docstrings:\n- def get_form(self, *args, **kwargs): The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.\n- def get_context_data(self, *args, **kwargs): Add event_type and location and existing sessions to context", "prompted_full_text": "Implement the Python class `EventSessionFormViewMixin` described below.\n\nClass description:\nA mixin with the stuff shared between EventSession{Create|Update}View\n\nMethod signatures and docstrings:\n- def get_form(self, *args, **kwargs): The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.\n- def get_context_data(self, *args, **kwargs): Add event_type and location and existing sessions to context\n\n<|skeleton|>\nclass EventSessionFormViewMixin:\n \"\"\"A mixin with the stuff shared between EventSession{Create|Update}View\"\"\"\n\n def get_form(self, *args, **kwargs):\n \"\"\"The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.\"\"\"\n <|body_0|>\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add event_type and location and existing sessions to context\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n form = super().get_form(*args, **kwargs)\n form.fields['when'].widget.widgets[0].attrs = {'placeholder': f'Start Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n form.fields['when'].widget.widgets[1].attrs = {'placeholder': f'End Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n if hasattr(form.fields, 'event_location'):\n form.fields['event_location'].queryset = EventLocation.objects.filter(camp=self.camp)\n return form\n<|end_body_0|>\n\n<|body_start_1|>\n context = super().get_context_data(*args, **kwargs)\n if not hasattr(self, 'event_type'):\n self.event_type = self.get_object().event_type\n context['event_type'] = self.event_type\n if not hasattr(self, 'event_location'):\n self.event_location = self.get_object().event_location\n context['event_location'] = self.event_location\n context['sessions'] = self.event_type.event_sessions.filter(camp=self.camp)\n return context\n<|end_body_1|>\n", "revision_id": "767deb7f58429e9162e0c2ef79be9f0f38f37ce1", "skeleton": "<|skeleton|>\nclass EventSessionFormViewMixin:\n \"\"\"A mixin with the stuff shared between EventSession{Create|Update}View\"\"\"\n\n def get_form(self, *args, **kwargs):\n \"\"\"The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.\"\"\"\n <|body_0|>\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add event_type and location and existing sessions to context\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EventSessionFormViewMixin:\n \"\"\"A mixin with the stuff shared between EventSession{Create|Update}View\"\"\"\n\n def get_form(self, *args, **kwargs):\n \"\"\"The default range widgets are a bit shit because they eat the help_text and have no indication of which field is for what. So we add a nice placeholder. We also limit the event_location dropdown to only the current camps locations.\"\"\"\n form = super().get_form(*args, **kwargs)\n form.fields['when'].widget.widgets[0].attrs = {'placeholder': f'Start Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n form.fields['when'].widget.widgets[1].attrs = {'placeholder': f'End Date and Time (YYYY-MM-DD HH:MM). Time zone is {settings.TIME_ZONE}.'}\n if hasattr(form.fields, 'event_location'):\n form.fields['event_location'].queryset = EventLocation.objects.filter(camp=self.camp)\n return form\n\n def get_context_data(self, *args, **kwargs):\n \"\"\"Add event_type and location and existing sessions to context\"\"\"\n context = super().get_context_data(*args, **kwargs)\n if not hasattr(self, 'event_type'):\n self.event_type = self.get_object().event_type\n context['event_type'] = self.event_type\n if not hasattr(self, 'event_location'):\n self.event_location = self.get_object().event_location\n context['event_location'] = self.event_location\n context['sessions'] = self.event_type.event_sessions.filter(camp=self.camp)\n return context\n", "source": "the_stack_v2_python_sparse", "source_path": "src/backoffice/views/program.py", "source_repo": "bornhack/bornhack-website", "split": "test", "star_events_count": 9} {"blob_id": "c1b088ab9fb2e11bb3f95198d8f112d2be020e8d", "bodies": ["super().__init__(classifier=classifier, regressor=regressor, coder=coder)\nself.logger = None\nself.fg_bg_sampler = sampler", "box_logits, box_deltas = (prediction['box_logits'], prediction['box_deltas'])\nlosses = {}\nsampled_pos_inds, sampled_neg_inds = self.select_indices(target_labels, box_logits)\nsampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\ntarget_labels = torch.cat(target_labels, dim=0)\nwith torch.no_grad():\n batch_matched_gt_boxes = torch.cat(matched_gt_boxes, dim=0)\n batch_anchors = torch.cat(anchors, dim=0)\n target_deltas_sampled = self.coder.encode_single(batch_matched_gt_boxes[sampled_pos_inds], batch_anchors[sampled_pos_inds])\nif sampled_pos_inds.numel() > 0:\n losses['reg'] = self.regressor.compute_loss(box_deltas[sampled_pos_inds], target_deltas_sampled) / max(1, sampled_pos_inds.numel())\nlosses['cls'] = self.classifier.compute_loss(box_logits[sampled_inds], target_labels[sampled_inds])\nreturn (losses, sampled_pos_inds, sampled_neg_inds)", "boxes_max_fg_probs = self.classifier.box_logits_to_probs(boxes_scores)\nboxes_max_fg_probs = boxes_max_fg_probs.max(dim=1)[0]\nsampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(target_labels, boxes_max_fg_probs)\nsampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]\nsampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]\nreturn (sampled_pos_inds, sampled_neg_inds)"], "bodies_text": "<|body_start_0|>\n super().__init__(classifier=classifier, regressor=regressor, coder=coder)\n self.logger = None\n self.fg_bg_sampler = sampler\n<|end_body_0|>\n\n<|body_start_1|>\n box_logits, box_deltas = (prediction['box_logits'], prediction['box_deltas'])\n losses = {}\n sampled_pos_inds, sampled_neg_inds = self.select_indices(target_labels, box_logits)\n sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n target_labels = torch.cat(target_labels, dim=0)\n with torch.no_grad():\n batch_matched_gt_boxes = torch.cat(matched_gt_boxes, dim=0)\n batch_anchors = torch.cat(anchors, dim=0)\n target_deltas_sampled = self.coder.encode_single(batch_matched_gt_boxes[sampled_pos_inds], batch_anchors[sampled_pos_inds])\n if sampled_pos_inds.numel() > 0:\n losses['reg'] = self.regressor.compute_loss(box_deltas[sampled_pos_inds], target_deltas_sampled) / max(1, sampled_pos_inds.numel())\n losses['cls'] = self.classifier.compute_loss(box_logits[sampled_inds], target_labels[sampled_inds])\n return (losses, sampled_pos_inds, sampled_neg_inds)\n<|end_body_1|>\n\n<|body_start_2|>\n boxes_max_fg_probs = self.classifier.box_logits_to_probs(boxes_scores)\n boxes_max_fg_probs = boxes_max_fg_probs.max(dim=1)[0]\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(target_labels, boxes_max_fg_probs)\n sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]\n sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]\n return (sampled_pos_inds, sampled_neg_inds)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DetectionHeadHNM", "detected_licenses": ["BSD-3-Clause", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DetectionHeadHNM:\n\n def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger'):\n \"\"\"Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed\"\"\"\n <|body_0|>\n\n def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]:\n \"\"\"Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)\"\"\"\n <|body_1|>\n\n def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(classifier=classifier, regressor=regressor, coder=coder)\n self.logger = None\n self.fg_bg_sampler = sampler\n<|end_body_0|>\n\n<|body_start_1|>\n box_logits, box_deltas = (prediction['box_logits'], prediction['box_deltas'])\n losses = {}\n sampled_pos_inds, sampled_neg_inds = self.select_indices(target_labels, box_logits)\n sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n target_labels = torch.cat(target_labels, dim=0)\n with torch.no_grad():\n batch_matched_gt_boxes = torch.cat(matched_gt_boxes, dim=0)\n batch_anchors = torch.cat(anchors, dim=0)\n target_deltas_sampled = self.coder.encode_single(batch_matched_gt_boxes[sampled_pos_inds], batch_anchors[sampled_pos_inds])\n if sampled_pos_inds.numel() > 0:\n losses['reg'] = self.regressor.compute_loss(box_deltas[sampled_pos_inds], target_deltas_sampled) / max(1, sampled_pos_inds.numel())\n losses['cls'] = self.classifier.compute_loss(box_logits[sampled_inds], target_labels[sampled_inds])\n return (losses, sampled_pos_inds, sampled_neg_inds)\n<|end_body_1|>\n\n<|body_start_2|>\n boxes_max_fg_probs = self.classifier.box_logits_to_probs(boxes_scores)\n boxes_max_fg_probs = boxes_max_fg_probs.max(dim=1)[0]\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(target_labels, boxes_max_fg_probs)\n sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]\n sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]\n return (sampled_pos_inds, sampled_neg_inds)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000430", "length_bytes": 21997, "license_type": "permissive", "methods": [{"docstring": "Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed", "name": "__init__", "signature": "def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger')"}, {"docstring": "Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)", "name": "compute_loss", "signature": "def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]"}, {"docstring": "Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]", "name": "select_indices", "signature": "def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_032087", "prompt": "Implement the Python class `DetectionHeadHNM` described below.\n\nClass description:\nImplement the DetectionHeadHNM class.\n\nMethod signatures and docstrings:\n- def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger'): Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed\n- def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]: Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)\n- def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]: Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]", "prompted_full_text": "Implement the Python class `DetectionHeadHNM` described below.\n\nClass description:\nImplement the DetectionHeadHNM class.\n\nMethod signatures and docstrings:\n- def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger'): Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed\n- def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]: Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)\n- def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]: Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]\n\n<|skeleton|>\nclass DetectionHeadHNM:\n\n def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger'):\n \"\"\"Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed\"\"\"\n <|body_0|>\n\n def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]:\n \"\"\"Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)\"\"\"\n <|body_1|>\n\n def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(classifier=classifier, regressor=regressor, coder=coder)\n self.logger = None\n self.fg_bg_sampler = sampler\n<|end_body_0|>\n\n<|body_start_1|>\n box_logits, box_deltas = (prediction['box_logits'], prediction['box_deltas'])\n losses = {}\n sampled_pos_inds, sampled_neg_inds = self.select_indices(target_labels, box_logits)\n sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n target_labels = torch.cat(target_labels, dim=0)\n with torch.no_grad():\n batch_matched_gt_boxes = torch.cat(matched_gt_boxes, dim=0)\n batch_anchors = torch.cat(anchors, dim=0)\n target_deltas_sampled = self.coder.encode_single(batch_matched_gt_boxes[sampled_pos_inds], batch_anchors[sampled_pos_inds])\n if sampled_pos_inds.numel() > 0:\n losses['reg'] = self.regressor.compute_loss(box_deltas[sampled_pos_inds], target_deltas_sampled) / max(1, sampled_pos_inds.numel())\n losses['cls'] = self.classifier.compute_loss(box_logits[sampled_inds], target_labels[sampled_inds])\n return (losses, sampled_pos_inds, sampled_neg_inds)\n<|end_body_1|>\n\n<|body_start_2|>\n boxes_max_fg_probs = self.classifier.box_logits_to_probs(boxes_scores)\n boxes_max_fg_probs = boxes_max_fg_probs.max(dim=1)[0]\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(target_labels, boxes_max_fg_probs)\n sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]\n sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]\n return (sampled_pos_inds, sampled_neg_inds)\n<|end_body_2|>\n", "revision_id": "4f41faa7536dcef8fca7b647dcdca25360e5b58a", "skeleton": "<|skeleton|>\nclass DetectionHeadHNM:\n\n def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger'):\n \"\"\"Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed\"\"\"\n <|body_0|>\n\n def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]:\n \"\"\"Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)\"\"\"\n <|body_1|>\n\n def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DetectionHeadHNM:\n def __init__(self, classifier: Classifier, regressor: Regressor, coder: BoxCoderND, sampler: AbstractSampler, log_num_anchors: Optional[str]='mllogger'):\n \"\"\"Detection head with classifier and regression module. Uses hard negative example mining to compute loss Args: classifier: classifier module regressor: regression module sampler (AbstractSampler): sampler for select positive and negative examples log_num_anchors (str): name of logger to use; if None, no logging will be performed\"\"\"\n super().__init__(classifier=classifier, regressor=regressor, coder=coder)\n self.logger = None\n self.fg_bg_sampler = sampler\n\n def compute_loss(self, prediction: Dict[str, Tensor], target_labels: List[Tensor], matched_gt_boxes: List[Tensor], anchors: List[Tensor]) -> Tuple[Dict[str, Tensor], torch.Tensor, torch.Tensor]:\n \"\"\"Compute regression and classification loss N anchors over all images; M anchors per image => sum(M) = N Args: prediction: detection predictions for loss computation box_logits (Tensor): classification logits for each anchor [N, num_classes] box_deltas (Tensor): offsets for each anchor (x1, y1, x2, y2, (z1, z2))[N, dim * 2] target_labels (List[Tensor]): target labels for each anchor (per image) [M] matched_gt_boxes: matched gt box for each anchor List[[N, dim * 2]], N=number of anchors per image anchors: anchors per image List[[N, dim * 2]] Returns: Tensor: dict with losses (reg for regression loss, cls for classification loss) Tensor: sampled positive indices of anchors (after concatenation)\"\"\"\n box_logits, box_deltas = (prediction['box_logits'], prediction['box_deltas'])\n losses = {}\n sampled_pos_inds, sampled_neg_inds = self.select_indices(target_labels, box_logits)\n sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)\n target_labels = torch.cat(target_labels, dim=0)\n with torch.no_grad():\n batch_matched_gt_boxes = torch.cat(matched_gt_boxes, dim=0)\n batch_anchors = torch.cat(anchors, dim=0)\n target_deltas_sampled = self.coder.encode_single(batch_matched_gt_boxes[sampled_pos_inds], batch_anchors[sampled_pos_inds])\n if sampled_pos_inds.numel() > 0:\n losses['reg'] = self.regressor.compute_loss(box_deltas[sampled_pos_inds], target_deltas_sampled) / max(1, sampled_pos_inds.numel())\n losses['cls'] = self.classifier.compute_loss(box_logits[sampled_inds], target_labels[sampled_inds])\n return (losses, sampled_pos_inds, sampled_neg_inds)\n\n def select_indices(self, target_labels: List[Tensor], boxes_scores: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Sample positive and negative anchors from target labels Args: target_labels (List[Tensor]): target labels for each anchor (per image) [M] boxes_scores (Tensor): classification logits for each anchor [N, num_classes] Returns: Tensor: sampled positive indices [R] Tensor: sampled negative indices [R]\"\"\"\n boxes_max_fg_probs = self.classifier.box_logits_to_probs(boxes_scores)\n boxes_max_fg_probs = boxes_max_fg_probs.max(dim=1)[0]\n sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(target_labels, boxes_max_fg_probs)\n sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0]\n sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0]\n return (sampled_pos_inds, sampled_neg_inds)\n", "source": "the_stack_v2_python_sparse", "source_path": "nndet/arch/heads/comb.py", "source_repo": "dboun/nnDetection", "split": "test", "star_events_count": 1} {"blob_id": "c943bf5cfcd736d9c0b01436a03c2e9a903b2663", "bodies": ["super(BlockPusher, self).__init__()\nself.provider = provider\nself.blocks = blocks\nself.queue = queue", "loop_temp = 'Going to put block with key {:s} in provider {:s}'\nfor metablock, data in self.blocks.iteritems():\n logger.debug(loop_temp.format(metablock.key, str(type(self.provider))))\n try:\n self.provider.put(data, metablock.key)\n index = extract_index_from_key(metablock.key)\n self.queue[index] = metablock\n except redis.ConnectionError:\n index = extract_index_from_key(metablock.key)\n self.queue[index] = CouldNotPushException(self.provider, metablock.key)"], "bodies_text": "<|body_start_0|>\n super(BlockPusher, self).__init__()\n self.provider = provider\n self.blocks = blocks\n self.queue = queue\n<|end_body_0|>\n\n<|body_start_1|>\n loop_temp = 'Going to put block with key {:s} in provider {:s}'\n for metablock, data in self.blocks.iteritems():\n logger.debug(loop_temp.format(metablock.key, str(type(self.provider))))\n try:\n self.provider.put(data, metablock.key)\n index = extract_index_from_key(metablock.key)\n self.queue[index] = metablock\n except redis.ConnectionError:\n index = extract_index_from_key(metablock.key)\n self.queue[index] = CouldNotPushException(self.provider, metablock.key)\n<|end_body_1|>\n", "class_docstring": "Threaded code to push blocks using a given provider", "class_name": "BlockPusher", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlockPusher:\n \"\"\"Threaded code to push blocks using a given provider\"\"\"\n\n def __init__(self, provider, blocks, queue):\n \"\"\"Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BlockPusher, self).__init__()\n self.provider = provider\n self.blocks = blocks\n self.queue = queue\n<|end_body_0|>\n\n<|body_start_1|>\n loop_temp = 'Going to put block with key {:s} in provider {:s}'\n for metablock, data in self.blocks.iteritems():\n logger.debug(loop_temp.format(metablock.key, str(type(self.provider))))\n try:\n self.provider.put(data, metablock.key)\n index = extract_index_from_key(metablock.key)\n self.queue[index] = metablock\n except redis.ConnectionError:\n index = extract_index_from_key(metablock.key)\n self.queue[index] = CouldNotPushException(self.provider, metablock.key)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000431", "length_bytes": 20206, "license_type": "permissive", "methods": [{"docstring": "Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored", "name": "__init__", "signature": "def __init__(self, provider, blocks, queue)"}, {"docstring": "Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage", "name": "run", "signature": "def run(self)"}], "n_methods": 2, "prompt": "Implement the Python class `BlockPusher` described below.\n\nClass description:\nThreaded code to push blocks using a given provider\n\nMethod signatures and docstrings:\n- def __init__(self, provider, blocks, queue): Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored\n- def run(self): Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage", "prompted_full_text": "Implement the Python class `BlockPusher` described below.\n\nClass description:\nThreaded code to push blocks using a given provider\n\nMethod signatures and docstrings:\n- def __init__(self, provider, blocks, queue): Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored\n- def run(self): Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage\n\n<|skeleton|>\nclass BlockPusher:\n \"\"\"Threaded code to push blocks using a given provider\"\"\"\n\n def __init__(self, provider, blocks, queue):\n \"\"\"Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(BlockPusher, self).__init__()\n self.provider = provider\n self.blocks = blocks\n self.queue = queue\n<|end_body_0|>\n\n<|body_start_1|>\n loop_temp = 'Going to put block with key {:s} in provider {:s}'\n for metablock, data in self.blocks.iteritems():\n logger.debug(loop_temp.format(metablock.key, str(type(self.provider))))\n try:\n self.provider.put(data, metablock.key)\n index = extract_index_from_key(metablock.key)\n self.queue[index] = metablock\n except redis.ConnectionError:\n index = extract_index_from_key(metablock.key)\n self.queue[index] = CouldNotPushException(self.provider, metablock.key)\n<|end_body_1|>\n", "revision_id": "e9138580594569cbbc7d325e8cd4b1740667edac", "skeleton": "<|skeleton|>\nclass BlockPusher:\n \"\"\"Threaded code to push blocks using a given provider\"\"\"\n\n def __init__(self, provider, blocks, queue):\n \"\"\"Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored\"\"\"\n <|body_0|>\n\n def run(self):\n \"\"\"Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BlockPusher:\n \"\"\"Threaded code to push blocks using a given provider\"\"\"\n\n def __init__(self, provider, blocks, queue):\n \"\"\"Initialize the BlockPusher by providing blocks to store in a provider the queue to push the metablocks produced by the insertion Args: provider(Provider) -- The provider that will store the data blocks(dict(MetaBlock, bytes) -- The list of blocks that constitutes the file queue(dict) -- Queue where the metablocks should be stored once the block are stored\"\"\"\n super(BlockPusher, self).__init__()\n self.provider = provider\n self.blocks = blocks\n self.queue = queue\n\n def run(self):\n \"\"\"Loop through self.blocks, selecting those stored at indices listed in self.indices and feeding them to self.provider for storage\"\"\"\n loop_temp = 'Going to put block with key {:s} in provider {:s}'\n for metablock, data in self.blocks.iteritems():\n logger.debug(loop_temp.format(metablock.key, str(type(self.provider))))\n try:\n self.provider.put(data, metablock.key)\n index = extract_index_from_key(metablock.key)\n self.queue[index] = metablock\n except redis.ConnectionError:\n index = extract_index_from_key(metablock.key)\n self.queue[index] = CouldNotPushException(self.provider, metablock.key)\n", "source": "the_stack_v2_python_sparse", "source_path": "pyproxy/pyproxy/providers/dispatcher.py", "source_repo": "safecloud-project/recast", "split": "test", "star_events_count": 0} {"blob_id": "2dc0a94372c478853f496fb1fa1ff0f56e8df822", "bodies": ["edges = self.get_edges(True)\n_in_degree = [0] * len(edges)\nfor edge_list in edges:\n for edge in edge_list:\n _in_degree[edge[1]] += 1\nreturn _in_degree", "_in_degree = self.get_in_degree()\nedges = self.get_edges(True)\nsequence = []\n_next = [inx for inx, item in enumerate(_in_degree) if item == 0]\nwhile _next:\n vertex = _next.pop()\n sequence.append(vertex)\n for edge in edges[vertex]:\n _in_degree[edge[1]] -= 1\n if _in_degree[edge[1]] <= 0:\n _next.append(edge[1])\nreturn sequence", "indexes = self.sort()\nsequence = self.get_nodes_sequence(indexes)\nprint(sequence)"], "bodies_text": "<|body_start_0|>\n edges = self.get_edges(True)\n _in_degree = [0] * len(edges)\n for edge_list in edges:\n for edge in edge_list:\n _in_degree[edge[1]] += 1\n return _in_degree\n<|end_body_0|>\n\n<|body_start_1|>\n _in_degree = self.get_in_degree()\n edges = self.get_edges(True)\n sequence = []\n _next = [inx for inx, item in enumerate(_in_degree) if item == 0]\n while _next:\n vertex = _next.pop()\n sequence.append(vertex)\n for edge in edges[vertex]:\n _in_degree[edge[1]] -= 1\n if _in_degree[edge[1]] <= 0:\n _next.append(edge[1])\n return sequence\n<|end_body_1|>\n\n<|body_start_2|>\n indexes = self.sort()\n sequence = self.get_nodes_sequence(indexes)\n print(sequence)\n<|end_body_2|>\n", "class_docstring": "Implementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges", "class_name": "TopologicalSort", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TopologicalSort:\n \"\"\"Implementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges\"\"\"\n\n def get_in_degree(self):\n \"\"\"Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node\"\"\"\n <|body_0|>\n\n def sort(self):\n \"\"\"Make topological sort\"\"\"\n <|body_1|>\n\n def main(self):\n \"\"\"main entry point\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n edges = self.get_edges(True)\n _in_degree = [0] * len(edges)\n for edge_list in edges:\n for edge in edge_list:\n _in_degree[edge[1]] += 1\n return _in_degree\n<|end_body_0|>\n\n<|body_start_1|>\n _in_degree = self.get_in_degree()\n edges = self.get_edges(True)\n sequence = []\n _next = [inx for inx, item in enumerate(_in_degree) if item == 0]\n while _next:\n vertex = _next.pop()\n sequence.append(vertex)\n for edge in edges[vertex]:\n _in_degree[edge[1]] -= 1\n if _in_degree[edge[1]] <= 0:\n _next.append(edge[1])\n return sequence\n<|end_body_1|>\n\n<|body_start_2|>\n indexes = self.sort()\n sequence = self.get_nodes_sequence(indexes)\n print(sequence)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000432", "length_bytes": 2526, "license_type": "no_license", "methods": [{"docstring": "Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node", "name": "get_in_degree", "signature": "def get_in_degree(self)"}, {"docstring": "Make topological sort", "name": "sort", "signature": "def sort(self)"}, {"docstring": "main entry point", "name": "main", "signature": "def main(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_022431", "prompt": "Implement the Python class `TopologicalSort` described below.\n\nClass description:\nImplementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges\n\nMethod signatures and docstrings:\n- def get_in_degree(self): Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node\n- def sort(self): Make topological sort\n- def main(self): main entry point", "prompted_full_text": "Implement the Python class `TopologicalSort` described below.\n\nClass description:\nImplementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges\n\nMethod signatures and docstrings:\n- def get_in_degree(self): Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node\n- def sort(self): Make topological sort\n- def main(self): main entry point\n\n<|skeleton|>\nclass TopologicalSort:\n \"\"\"Implementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges\"\"\"\n\n def get_in_degree(self):\n \"\"\"Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node\"\"\"\n <|body_0|>\n\n def sort(self):\n \"\"\"Make topological sort\"\"\"\n <|body_1|>\n\n def main(self):\n \"\"\"main entry point\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n edges = self.get_edges(True)\n _in_degree = [0] * len(edges)\n for edge_list in edges:\n for edge in edge_list:\n _in_degree[edge[1]] += 1\n return _in_degree\n<|end_body_0|>\n\n<|body_start_1|>\n _in_degree = self.get_in_degree()\n edges = self.get_edges(True)\n sequence = []\n _next = [inx for inx, item in enumerate(_in_degree) if item == 0]\n while _next:\n vertex = _next.pop()\n sequence.append(vertex)\n for edge in edges[vertex]:\n _in_degree[edge[1]] -= 1\n if _in_degree[edge[1]] <= 0:\n _next.append(edge[1])\n return sequence\n<|end_body_1|>\n\n<|body_start_2|>\n indexes = self.sort()\n sequence = self.get_nodes_sequence(indexes)\n print(sequence)\n<|end_body_2|>\n", "revision_id": "8b3b1f146b7eac5dc15b16aaf837441069cf5989", "skeleton": "<|skeleton|>\nclass TopologicalSort:\n \"\"\"Implementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges\"\"\"\n\n def get_in_degree(self):\n \"\"\"Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node\"\"\"\n <|body_0|>\n\n def sort(self):\n \"\"\"Make topological sort\"\"\"\n <|body_1|>\n\n def main(self):\n \"\"\"main entry point\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TopologicalSort:\n \"\"\"Implementation of topological sort relations between vertices can be presented as matrix N x N Since an adjacency matrix has n2 entries, it must be true that m <= n2 (from DAG definition) where n count of vertexes; m count of edges\"\"\"\n\n def get_in_degree(self):\n \"\"\"Create list of graph's nodes in-degree Count of vertices which depends on current one. Complexity: O(n+m) :Returns: *list* - list with in-degree of each node\"\"\"\n edges = self.get_edges(True)\n _in_degree = [0] * len(edges)\n for edge_list in edges:\n for edge in edge_list:\n _in_degree[edge[1]] += 1\n return _in_degree\n\n def sort(self):\n \"\"\"Make topological sort\"\"\"\n _in_degree = self.get_in_degree()\n edges = self.get_edges(True)\n sequence = []\n _next = [inx for inx, item in enumerate(_in_degree) if item == 0]\n while _next:\n vertex = _next.pop()\n sequence.append(vertex)\n for edge in edges[vertex]:\n _in_degree[edge[1]] -= 1\n if _in_degree[edge[1]] <= 0:\n _next.append(edge[1])\n return sequence\n\n def main(self):\n \"\"\"main entry point\"\"\"\n indexes = self.sort()\n sequence = self.get_nodes_sequence(indexes)\n print(sequence)\n", "source": "the_stack_v2_python_sparse", "source_path": "graph/topological_sort.py", "source_repo": "shuvava/python_algorithms", "split": "test", "star_events_count": 2} {"blob_id": "43cba6372cd98ed59049500c685395f4ade5b964", "bodies": ["def dfsSum(root, base):\n if not root:\n return 0\n rt = base * 10 + root.val\n if not root.left and (not root.right):\n return rt\n else:\n return dfsSum(root.left, rt) + dfsSum(root.right, rt)\nreturn dfsSum(root, 0)", "stk = [(root, 0)]\ns = 0\nwhile stk:\n p = stk.pop()\n if p[0]:\n base = p[1] * 10 + p[0].val\n if not p[0].left and (not p[0].right):\n s += base\n else:\n stk.append((p[0].left, base))\n stk.append((p[0].right, base))\nreturn s", "queue = [(root, 0)]\ns = 0\nfor rt, pre in queue:\n if rt:\n base = rt.val + pre * 10\n if not rt.left and (not rt.right):\n s += base\n else:\n queue.append((rt.left, base))\n queue.append((rt.right, base))\nreturn s"], "bodies_text": "<|body_start_0|>\n def dfsSum(root, base):\n if not root:\n return 0\n rt = base * 10 + root.val\n if not root.left and (not root.right):\n return rt\n else:\n return dfsSum(root.left, rt) + dfsSum(root.right, rt)\n return dfsSum(root, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n stk = [(root, 0)]\n s = 0\n while stk:\n p = stk.pop()\n if p[0]:\n base = p[1] * 10 + p[0].val\n if not p[0].left and (not p[0].right):\n s += base\n else:\n stk.append((p[0].left, base))\n stk.append((p[0].right, base))\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n queue = [(root, 0)]\n s = 0\n for rt, pre in queue:\n if rt:\n base = rt.val + pre * 10\n if not rt.left and (not rt.right):\n s += base\n else:\n queue.append((rt.left, base))\n queue.append((rt.right, base))\n return s\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def sumNumbers_dfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def sumNumbers_recuisive(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n def sumNumbers_bfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfsSum(root, base):\n if not root:\n return 0\n rt = base * 10 + root.val\n if not root.left and (not root.right):\n return rt\n else:\n return dfsSum(root.left, rt) + dfsSum(root.right, rt)\n return dfsSum(root, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n stk = [(root, 0)]\n s = 0\n while stk:\n p = stk.pop()\n if p[0]:\n base = p[1] * 10 + p[0].val\n if not p[0].left and (not p[0].right):\n s += base\n else:\n stk.append((p[0].left, base))\n stk.append((p[0].right, base))\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n queue = [(root, 0)]\n s = 0\n for rt, pre in queue:\n if rt:\n base = rt.val + pre * 10\n if not rt.left and (not rt.right):\n s += base\n else:\n queue.append((rt.left, base))\n queue.append((rt.right, base))\n return s\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000433", "length_bytes": 1781, "license_type": "no_license", "methods": [{"docstring": ":type root: TreeNode :rtype: int", "name": "sumNumbers_dfs", "signature": "def sumNumbers_dfs(self, root)"}, {"docstring": ":type root: TreeNode :rtype: int", "name": "sumNumbers_recuisive", "signature": "def sumNumbers_recuisive(self, root)"}, {"docstring": ":type root: TreeNode :rtype: int", "name": "sumNumbers_bfs", "signature": "def sumNumbers_bfs(self, root)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_009358", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def sumNumbers_dfs(self, root): :type root: TreeNode :rtype: int\n- def sumNumbers_recuisive(self, root): :type root: TreeNode :rtype: int\n- def sumNumbers_bfs(self, root): :type root: TreeNode :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def sumNumbers_dfs(self, root): :type root: TreeNode :rtype: int\n- def sumNumbers_recuisive(self, root): :type root: TreeNode :rtype: int\n- def sumNumbers_bfs(self, root): :type root: TreeNode :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def sumNumbers_dfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def sumNumbers_recuisive(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n def sumNumbers_bfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfsSum(root, base):\n if not root:\n return 0\n rt = base * 10 + root.val\n if not root.left and (not root.right):\n return rt\n else:\n return dfsSum(root.left, rt) + dfsSum(root.right, rt)\n return dfsSum(root, 0)\n<|end_body_0|>\n\n<|body_start_1|>\n stk = [(root, 0)]\n s = 0\n while stk:\n p = stk.pop()\n if p[0]:\n base = p[1] * 10 + p[0].val\n if not p[0].left and (not p[0].right):\n s += base\n else:\n stk.append((p[0].left, base))\n stk.append((p[0].right, base))\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n queue = [(root, 0)]\n s = 0\n for rt, pre in queue:\n if rt:\n base = rt.val + pre * 10\n if not rt.left and (not rt.right):\n s += base\n else:\n queue.append((rt.left, base))\n queue.append((rt.right, base))\n return s\n<|end_body_2|>\n", "revision_id": "0e99f9a5226507706b3ee66fd04bae813755ef40", "skeleton": "<|skeleton|>\nclass Solution:\n\n def sumNumbers_dfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_0|>\n\n def sumNumbers_recuisive(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_1|>\n\n def sumNumbers_bfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def sumNumbers_dfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n def dfsSum(root, base):\n if not root:\n return 0\n rt = base * 10 + root.val\n if not root.left and (not root.right):\n return rt\n else:\n return dfsSum(root.left, rt) + dfsSum(root.right, rt)\n return dfsSum(root, 0)\n\n def sumNumbers_recuisive(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n stk = [(root, 0)]\n s = 0\n while stk:\n p = stk.pop()\n if p[0]:\n base = p[1] * 10 + p[0].val\n if not p[0].left and (not p[0].right):\n s += base\n else:\n stk.append((p[0].left, base))\n stk.append((p[0].right, base))\n return s\n\n def sumNumbers_bfs(self, root):\n \"\"\":type root: TreeNode :rtype: int\"\"\"\n queue = [(root, 0)]\n s = 0\n for rt, pre in queue:\n if rt:\n base = rt.val + pre * 10\n if not rt.left and (not rt.right):\n s += base\n else:\n queue.append((rt.left, base))\n queue.append((rt.right, base))\n return s\n", "source": "the_stack_v2_python_sparse", "source_path": "medium/tree/test_129_Sum_Root_to_Leaf_Numbers.py", "source_repo": "wuxu1019/leetcode_sophia", "split": "test", "star_events_count": 1} {"blob_id": "3f1ba51335236df24394d6d94972c6d8e6dc7d31", "bodies": ["tika_import.check()\nsuper().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages, id_hash_keys=id_hash_keys)\nping = requests.get(tika_url, timeout=timeout)\nif ping.status_code != 200:\n raise Exception(f\"Apache Tika server is not reachable at the URL '{tika_url}'. To run it locallywith Docker, execute: 'docker run -p 9998:9998 apache/tika:1.28.4'\")\nself.tika_url = tika_url\nsuper().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages)", "if remove_numeric_tables is None:\n remove_numeric_tables = self.remove_numeric_tables\nif valid_languages is None:\n valid_languages = self.valid_languages\nif id_hash_keys is None:\n id_hash_keys = self.id_hash_keys\nparsed = tika_parser.from_file(file_path.as_posix(), self.tika_url, xmlContent=True)\nparser = TikaXHTMLParser()\nparser.feed(parsed['content'])\ncleaned_pages = []\nfor page in parser.pages:\n lines = page.splitlines()\n cleaned_lines = []\n for line in lines:\n words = line.split()\n digits = [word for word in words if any((i.isdigit() for i in word))]\n if remove_numeric_tables:\n if words and len(digits) / len(words) > 0.4 and (not line.strip().endswith('.')):\n logger.debug(\"Removing line '%s' from %s\", line, file_path)\n continue\n cleaned_lines.append(line)\n page = '\\n'.join(cleaned_lines)\n cleaned_pages.append(page)\nif valid_languages:\n document_text = ''.join(cleaned_pages)\n if not self.validate_language(document_text, valid_languages):\n logger.warning('The language for %s is not one of %s. The file may not have been decoded in the correct text format.', file_path, valid_languages)\ntext = '\\x0c'.join(cleaned_pages)\ndocument = Document(content=text, meta={**parsed['metadata'], **(meta or {})}, id_hash_keys=id_hash_keys)\nreturn [document]"], "bodies_text": "<|body_start_0|>\n tika_import.check()\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages, id_hash_keys=id_hash_keys)\n ping = requests.get(tika_url, timeout=timeout)\n if ping.status_code != 200:\n raise Exception(f\"Apache Tika server is not reachable at the URL '{tika_url}'. To run it locallywith Docker, execute: 'docker run -p 9998:9998 apache/tika:1.28.4'\")\n self.tika_url = tika_url\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages)\n<|end_body_0|>\n\n<|body_start_1|>\n if remove_numeric_tables is None:\n remove_numeric_tables = self.remove_numeric_tables\n if valid_languages is None:\n valid_languages = self.valid_languages\n if id_hash_keys is None:\n id_hash_keys = self.id_hash_keys\n parsed = tika_parser.from_file(file_path.as_posix(), self.tika_url, xmlContent=True)\n parser = TikaXHTMLParser()\n parser.feed(parsed['content'])\n cleaned_pages = []\n for page in parser.pages:\n lines = page.splitlines()\n cleaned_lines = []\n for line in lines:\n words = line.split()\n digits = [word for word in words if any((i.isdigit() for i in word))]\n if remove_numeric_tables:\n if words and len(digits) / len(words) > 0.4 and (not line.strip().endswith('.')):\n logger.debug(\"Removing line '%s' from %s\", line, file_path)\n continue\n cleaned_lines.append(line)\n page = '\\n'.join(cleaned_lines)\n cleaned_pages.append(page)\n if valid_languages:\n document_text = ''.join(cleaned_pages)\n if not self.validate_language(document_text, valid_languages):\n logger.warning('The language for %s is not one of %s. The file may not have been decoded in the correct text format.', file_path, valid_languages)\n text = '\\x0c'.join(cleaned_pages)\n document = Document(content=text, meta={**parsed['metadata'], **(meta or {})}, id_hash_keys=id_hash_keys)\n return [document]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "TikaConverter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TikaConverter:\n\n def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0):\n \"\"\":param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it\"\"\"\n <|body_0|>\n\n def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]:\n \"\"\":param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tika_import.check()\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages, id_hash_keys=id_hash_keys)\n ping = requests.get(tika_url, timeout=timeout)\n if ping.status_code != 200:\n raise Exception(f\"Apache Tika server is not reachable at the URL '{tika_url}'. To run it locallywith Docker, execute: 'docker run -p 9998:9998 apache/tika:1.28.4'\")\n self.tika_url = tika_url\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages)\n<|end_body_0|>\n\n<|body_start_1|>\n if remove_numeric_tables is None:\n remove_numeric_tables = self.remove_numeric_tables\n if valid_languages is None:\n valid_languages = self.valid_languages\n if id_hash_keys is None:\n id_hash_keys = self.id_hash_keys\n parsed = tika_parser.from_file(file_path.as_posix(), self.tika_url, xmlContent=True)\n parser = TikaXHTMLParser()\n parser.feed(parsed['content'])\n cleaned_pages = []\n for page in parser.pages:\n lines = page.splitlines()\n cleaned_lines = []\n for line in lines:\n words = line.split()\n digits = [word for word in words if any((i.isdigit() for i in word))]\n if remove_numeric_tables:\n if words and len(digits) / len(words) > 0.4 and (not line.strip().endswith('.')):\n logger.debug(\"Removing line '%s' from %s\", line, file_path)\n continue\n cleaned_lines.append(line)\n page = '\\n'.join(cleaned_lines)\n cleaned_pages.append(page)\n if valid_languages:\n document_text = ''.join(cleaned_pages)\n if not self.validate_language(document_text, valid_languages):\n logger.warning('The language for %s is not one of %s. The file may not have been decoded in the correct text format.', file_path, valid_languages)\n text = '\\x0c'.join(cleaned_pages)\n document = Document(content=text, meta={**parsed['metadata'], **(meta or {})}, id_hash_keys=id_hash_keys)\n return [document]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000434", "length_bytes": 9174, "license_type": "permissive", "methods": [{"docstring": ":param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it", "name": "__init__", "signature": "def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0)"}, {"docstring": ":param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to", "name": "convert", "signature": "def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034993", "prompt": "Implement the Python class `TikaConverter` described below.\n\nClass description:\nImplement the TikaConverter class.\n\nMethod signatures and docstrings:\n- def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0): :param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it\n- def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]: :param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to", "prompted_full_text": "Implement the Python class `TikaConverter` described below.\n\nClass description:\nImplement the TikaConverter class.\n\nMethod signatures and docstrings:\n- def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0): :param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it\n- def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]: :param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to\n\n<|skeleton|>\nclass TikaConverter:\n\n def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0):\n \"\"\":param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it\"\"\"\n <|body_0|>\n\n def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]:\n \"\"\":param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n tika_import.check()\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages, id_hash_keys=id_hash_keys)\n ping = requests.get(tika_url, timeout=timeout)\n if ping.status_code != 200:\n raise Exception(f\"Apache Tika server is not reachable at the URL '{tika_url}'. To run it locallywith Docker, execute: 'docker run -p 9998:9998 apache/tika:1.28.4'\")\n self.tika_url = tika_url\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages)\n<|end_body_0|>\n\n<|body_start_1|>\n if remove_numeric_tables is None:\n remove_numeric_tables = self.remove_numeric_tables\n if valid_languages is None:\n valid_languages = self.valid_languages\n if id_hash_keys is None:\n id_hash_keys = self.id_hash_keys\n parsed = tika_parser.from_file(file_path.as_posix(), self.tika_url, xmlContent=True)\n parser = TikaXHTMLParser()\n parser.feed(parsed['content'])\n cleaned_pages = []\n for page in parser.pages:\n lines = page.splitlines()\n cleaned_lines = []\n for line in lines:\n words = line.split()\n digits = [word for word in words if any((i.isdigit() for i in word))]\n if remove_numeric_tables:\n if words and len(digits) / len(words) > 0.4 and (not line.strip().endswith('.')):\n logger.debug(\"Removing line '%s' from %s\", line, file_path)\n continue\n cleaned_lines.append(line)\n page = '\\n'.join(cleaned_lines)\n cleaned_pages.append(page)\n if valid_languages:\n document_text = ''.join(cleaned_pages)\n if not self.validate_language(document_text, valid_languages):\n logger.warning('The language for %s is not one of %s. The file may not have been decoded in the correct text format.', file_path, valid_languages)\n text = '\\x0c'.join(cleaned_pages)\n document = Document(content=text, meta={**parsed['metadata'], **(meta or {})}, id_hash_keys=id_hash_keys)\n return [document]\n<|end_body_1|>\n", "revision_id": "5f1256ac7e5734c2ea481e72cb7e02c34baf8c43", "skeleton": "<|skeleton|>\nclass TikaConverter:\n\n def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0):\n \"\"\":param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it\"\"\"\n <|body_0|>\n\n def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]:\n \"\"\":param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TikaConverter:\n def __init__(self, tika_url: str='http://localhost:9998/tika', remove_numeric_tables: bool=False, valid_languages: Optional[List[str]]=None, id_hash_keys: Optional[List[str]]=None, timeout: Union[float, Tuple[float, float]]=10.0):\n \"\"\":param tika_url: URL of the Tika server :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to add test for encoding errors. If the extracted text is not one of the valid languages, then it\"\"\"\n tika_import.check()\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages, id_hash_keys=id_hash_keys)\n ping = requests.get(tika_url, timeout=timeout)\n if ping.status_code != 200:\n raise Exception(f\"Apache Tika server is not reachable at the URL '{tika_url}'. To run it locallywith Docker, execute: 'docker run -p 9998:9998 apache/tika:1.28.4'\")\n self.tika_url = tika_url\n super().__init__(remove_numeric_tables=remove_numeric_tables, valid_languages=valid_languages)\n\n def convert(self, file_path: Path, meta: Optional[Dict[str, str]]=None, remove_numeric_tables: Optional[bool]=None, valid_languages: Optional[List[str]]=None, encoding: Optional[str]=None, id_hash_keys: Optional[List[str]]=None) -> List[Document]:\n \"\"\":param file_path: path of the file to convert :param meta: dictionary of meta data key-value pairs to append in the returned document. :param remove_numeric_tables: This option uses heuristics to remove numeric rows from the tables. The tabular structures in documents might be noise for the reader model if it does not have table parsing capability for finding answers. However, tables may also have long strings that could possible candidate for searching answers. The rows containing strings are thus retained in this option. :param valid_languages: validate languages from a list of languages specified in the ISO 639-1 (https://en.wikipedia.org/wiki/ISO_639-1) format. This option can be used to\"\"\"\n if remove_numeric_tables is None:\n remove_numeric_tables = self.remove_numeric_tables\n if valid_languages is None:\n valid_languages = self.valid_languages\n if id_hash_keys is None:\n id_hash_keys = self.id_hash_keys\n parsed = tika_parser.from_file(file_path.as_posix(), self.tika_url, xmlContent=True)\n parser = TikaXHTMLParser()\n parser.feed(parsed['content'])\n cleaned_pages = []\n for page in parser.pages:\n lines = page.splitlines()\n cleaned_lines = []\n for line in lines:\n words = line.split()\n digits = [word for word in words if any((i.isdigit() for i in word))]\n if remove_numeric_tables:\n if words and len(digits) / len(words) > 0.4 and (not line.strip().endswith('.')):\n logger.debug(\"Removing line '%s' from %s\", line, file_path)\n continue\n cleaned_lines.append(line)\n page = '\\n'.join(cleaned_lines)\n cleaned_pages.append(page)\n if valid_languages:\n document_text = ''.join(cleaned_pages)\n if not self.validate_language(document_text, valid_languages):\n logger.warning('The language for %s is not one of %s. The file may not have been decoded in the correct text format.', file_path, valid_languages)\n text = '\\x0c'.join(cleaned_pages)\n document = Document(content=text, meta={**parsed['metadata'], **(meta or {})}, id_hash_keys=id_hash_keys)\n return [document]\n", "source": "the_stack_v2_python_sparse", "source_path": "haystack/nodes/file_converter/tika.py", "source_repo": "deepset-ai/haystack", "split": "test", "star_events_count": 10599} {"blob_id": "189a98f81e391c25d36dce9f6e56fc9bae075046", "bodies": ["d = departmentmanage(self.driver)\nd.open_departmentmanage()\nself.assertEqual(d.verify(), True)\nd.modify_obj()\nself.assertEqual(d.sub_tagname(), '部门管理-修改')\nd.name_clear()\nd.add_department(Data.depname + 'Update', '')\nd.add_save()\nself.assertEqual(d.success(), True)\nfunction.screenshot(self.driver, 'modify_department_name.jpg')", "d = departmentmanage(self.driver)\nd.open_departmentmanage()\nself.assertEqual(d.verify(), True)\nd.modify_obj()\nself.assertEqual(d.sub_tagname(), '部门管理-修改')\nd.order_clear()\nd.add_department('', '111')\nd.add_save()\nself.assertEqual(d.success(), True)\nfunction.screenshot(self.driver, 'modify_department_order.jpg')", "d = departmentmanage(self.driver)\nd.open_departmentmanage()\nself.assertEqual(d.verify(), True)\nd.modify_obj()\nself.assertEqual(d.sub_tagname(), '部门管理-修改')\nd.change_dept()\nd.add_save()\nself.assertEqual(d.success(), True)\nfunction.screenshot(self.driver, 'modify_department_dept.jpg')", "d = departmentmanage(self.driver)\nd.open_departmentmanage()\nself.assertEqual(d.verify(), True)\nd.modify_obj()\nself.assertEqual(d.sub_tagname(), '部门管理-修改')\nd.add_back()\nself.assertEqual(d.verify(), True)\nfunction.screenshot(self.driver, 'modify_department_back.jpg')"], "bodies_text": "<|body_start_0|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.name_clear()\n d.add_department(Data.depname + 'Update', '')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_name.jpg')\n<|end_body_0|>\n\n<|body_start_1|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.order_clear()\n d.add_department('', '111')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_order.jpg')\n<|end_body_1|>\n\n<|body_start_2|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.change_dept()\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_dept.jpg')\n<|end_body_2|>\n\n<|body_start_3|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.add_back()\n self.assertEqual(d.verify(), True)\n function.screenshot(self.driver, 'modify_department_back.jpg')\n<|end_body_3|>\n", "class_docstring": "", "class_name": "Test028_Department_Modify_P1", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Test028_Department_Modify_P1:\n\n def test_modify_name(self):\n \"\"\"修改部门名称\"\"\"\n <|body_0|>\n\n def test_modify_number(self):\n \"\"\"修改排序号\"\"\"\n <|body_1|>\n\n def test_modify_dept(self):\n \"\"\"修改上级部门\"\"\"\n <|body_2|>\n\n def test_modify_back(self):\n \"\"\"修改部门并返回\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.name_clear()\n d.add_department(Data.depname + 'Update', '')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_name.jpg')\n<|end_body_0|>\n\n<|body_start_1|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.order_clear()\n d.add_department('', '111')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_order.jpg')\n<|end_body_1|>\n\n<|body_start_2|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.change_dept()\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_dept.jpg')\n<|end_body_2|>\n\n<|body_start_3|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.add_back()\n self.assertEqual(d.verify(), True)\n function.screenshot(self.driver, 'modify_department_back.jpg')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000435", "length_bytes": 2105, "license_type": "no_license", "methods": [{"docstring": "修改部门名称", "name": "test_modify_name", "signature": "def test_modify_name(self)"}, {"docstring": "修改排序号", "name": "test_modify_number", "signature": "def test_modify_number(self)"}, {"docstring": "修改上级部门", "name": "test_modify_dept", "signature": "def test_modify_dept(self)"}, {"docstring": "修改部门并返回", "name": "test_modify_back", "signature": "def test_modify_back(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_027460", "prompt": "Implement the Python class `Test028_Department_Modify_P1` described below.\n\nClass description:\nImplement the Test028_Department_Modify_P1 class.\n\nMethod signatures and docstrings:\n- def test_modify_name(self): 修改部门名称\n- def test_modify_number(self): 修改排序号\n- def test_modify_dept(self): 修改上级部门\n- def test_modify_back(self): 修改部门并返回", "prompted_full_text": "Implement the Python class `Test028_Department_Modify_P1` described below.\n\nClass description:\nImplement the Test028_Department_Modify_P1 class.\n\nMethod signatures and docstrings:\n- def test_modify_name(self): 修改部门名称\n- def test_modify_number(self): 修改排序号\n- def test_modify_dept(self): 修改上级部门\n- def test_modify_back(self): 修改部门并返回\n\n<|skeleton|>\nclass Test028_Department_Modify_P1:\n\n def test_modify_name(self):\n \"\"\"修改部门名称\"\"\"\n <|body_0|>\n\n def test_modify_number(self):\n \"\"\"修改排序号\"\"\"\n <|body_1|>\n\n def test_modify_dept(self):\n \"\"\"修改上级部门\"\"\"\n <|body_2|>\n\n def test_modify_back(self):\n \"\"\"修改部门并返回\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.name_clear()\n d.add_department(Data.depname + 'Update', '')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_name.jpg')\n<|end_body_0|>\n\n<|body_start_1|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.order_clear()\n d.add_department('', '111')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_order.jpg')\n<|end_body_1|>\n\n<|body_start_2|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.change_dept()\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_dept.jpg')\n<|end_body_2|>\n\n<|body_start_3|>\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.add_back()\n self.assertEqual(d.verify(), True)\n function.screenshot(self.driver, 'modify_department_back.jpg')\n<|end_body_3|>\n", "revision_id": "6f42c25249fc642cecc270578a180820988d45b5", "skeleton": "<|skeleton|>\nclass Test028_Department_Modify_P1:\n\n def test_modify_name(self):\n \"\"\"修改部门名称\"\"\"\n <|body_0|>\n\n def test_modify_number(self):\n \"\"\"修改排序号\"\"\"\n <|body_1|>\n\n def test_modify_dept(self):\n \"\"\"修改上级部门\"\"\"\n <|body_2|>\n\n def test_modify_back(self):\n \"\"\"修改部门并返回\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Test028_Department_Modify_P1:\n def test_modify_name(self):\n \"\"\"修改部门名称\"\"\"\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.name_clear()\n d.add_department(Data.depname + 'Update', '')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_name.jpg')\n\n def test_modify_number(self):\n \"\"\"修改排序号\"\"\"\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.order_clear()\n d.add_department('', '111')\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_order.jpg')\n\n def test_modify_dept(self):\n \"\"\"修改上级部门\"\"\"\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.change_dept()\n d.add_save()\n self.assertEqual(d.success(), True)\n function.screenshot(self.driver, 'modify_department_dept.jpg')\n\n def test_modify_back(self):\n \"\"\"修改部门并返回\"\"\"\n d = departmentmanage(self.driver)\n d.open_departmentmanage()\n self.assertEqual(d.verify(), True)\n d.modify_obj()\n self.assertEqual(d.sub_tagname(), '部门管理-修改')\n d.add_back()\n self.assertEqual(d.verify(), True)\n function.screenshot(self.driver, 'modify_department_back.jpg')\n", "source": "the_stack_v2_python_sparse", "source_path": "GlxssLive_web/TestCase/Manage_Department/Test028_department_modify_P1.py", "source_repo": "rrmiracle/GlxssLive", "split": "test", "star_events_count": 0} {"blob_id": "3224c0598ce3b6cbd2dd18d804f8c3ec6cc792c4", "bodies": ["super(SI_SNR, self).__init__()\nself.eps = eps\nself.pit = pit", "B, C, S = Y.size()\nzero_mean_target = Y - torch.mean(Y, dim=-1, keepdim=True)\nzero_mean_estimate = Y_ - torch.mean(Y_, dim=-1, keepdim=True)\ns_target = torch.unsqueeze(zero_mean_target, dim=1)\ns_estimate = torch.unsqueeze(zero_mean_estimate, dim=2)\npair_wise_dot = torch.sum(s_estimate * s_target, dim=3, keepdim=True)\ns_target_energy = torch.sum(s_target ** 2, dim=3, keepdim=True) + self.eps\npair_wise_proj = pair_wise_dot * s_target / s_target_energy\ne_noise = s_estimate - pair_wise_proj\npair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=3) / (torch.sum(e_noise ** 2, dim=3) + self.eps)\npair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + self.eps)\nif self.pit:\n perms = Y.new_tensor(list(permutations(range(C)))).long()\n index = torch.unsqueeze(perms, 2)\n perms_one_hot = Y.new_zeros((*perms.size(), C)).scatter_(2, index, 1)\n snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])\n max_snr_idx = torch.argmax(snr_set, dim=1)\n max_snr, _ = torch.max(snr_set, dim=1)\n max_snr /= C\nsi_snr = max_snr if self.pit else pair_wise_si_snr\nloss = 0 - torch.mean(si_snr)\nreturn loss"], "bodies_text": "<|body_start_0|>\n super(SI_SNR, self).__init__()\n self.eps = eps\n self.pit = pit\n<|end_body_0|>\n\n<|body_start_1|>\n B, C, S = Y.size()\n zero_mean_target = Y - torch.mean(Y, dim=-1, keepdim=True)\n zero_mean_estimate = Y_ - torch.mean(Y_, dim=-1, keepdim=True)\n s_target = torch.unsqueeze(zero_mean_target, dim=1)\n s_estimate = torch.unsqueeze(zero_mean_estimate, dim=2)\n pair_wise_dot = torch.sum(s_estimate * s_target, dim=3, keepdim=True)\n s_target_energy = torch.sum(s_target ** 2, dim=3, keepdim=True) + self.eps\n pair_wise_proj = pair_wise_dot * s_target / s_target_energy\n e_noise = s_estimate - pair_wise_proj\n pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=3) / (torch.sum(e_noise ** 2, dim=3) + self.eps)\n pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + self.eps)\n if self.pit:\n perms = Y.new_tensor(list(permutations(range(C)))).long()\n index = torch.unsqueeze(perms, 2)\n perms_one_hot = Y.new_zeros((*perms.size(), C)).scatter_(2, index, 1)\n snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])\n max_snr_idx = torch.argmax(snr_set, dim=1)\n max_snr, _ = torch.max(snr_set, dim=1)\n max_snr /= C\n si_snr = max_snr if self.pit else pair_wise_si_snr\n loss = 0 - torch.mean(si_snr)\n return loss\n<|end_body_1|>\n", "class_docstring": "Scale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325", "class_name": "SI_SNR", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SI_SNR:\n \"\"\"Scale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325\"\"\"\n\n def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None:\n \"\"\"Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})\"\"\"\n <|body_0|>\n\n def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SI_SNR, self).__init__()\n self.eps = eps\n self.pit = pit\n<|end_body_0|>\n\n<|body_start_1|>\n B, C, S = Y.size()\n zero_mean_target = Y - torch.mean(Y, dim=-1, keepdim=True)\n zero_mean_estimate = Y_ - torch.mean(Y_, dim=-1, keepdim=True)\n s_target = torch.unsqueeze(zero_mean_target, dim=1)\n s_estimate = torch.unsqueeze(zero_mean_estimate, dim=2)\n pair_wise_dot = torch.sum(s_estimate * s_target, dim=3, keepdim=True)\n s_target_energy = torch.sum(s_target ** 2, dim=3, keepdim=True) + self.eps\n pair_wise_proj = pair_wise_dot * s_target / s_target_energy\n e_noise = s_estimate - pair_wise_proj\n pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=3) / (torch.sum(e_noise ** 2, dim=3) + self.eps)\n pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + self.eps)\n if self.pit:\n perms = Y.new_tensor(list(permutations(range(C)))).long()\n index = torch.unsqueeze(perms, 2)\n perms_one_hot = Y.new_zeros((*perms.size(), C)).scatter_(2, index, 1)\n snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])\n max_snr_idx = torch.argmax(snr_set, dim=1)\n max_snr, _ = torch.max(snr_set, dim=1)\n max_snr /= C\n si_snr = max_snr if self.pit else pair_wise_si_snr\n loss = 0 - torch.mean(si_snr)\n return loss\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000436", "length_bytes": 3577, "license_type": "permissive", "methods": [{"docstring": "Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})", "name": "__init__", "signature": "def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None"}, {"docstring": "Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor", "name": "forward", "signature": "def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_046566", "prompt": "Implement the Python class `SI_SNR` described below.\n\nClass description:\nScale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325\n\nMethod signatures and docstrings:\n- def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None: Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})\n- def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor: Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor", "prompted_full_text": "Implement the Python class `SI_SNR` described below.\n\nClass description:\nScale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325\n\nMethod signatures and docstrings:\n- def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None: Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})\n- def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor: Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor\n\n<|skeleton|>\nclass SI_SNR:\n \"\"\"Scale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325\"\"\"\n\n def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None:\n \"\"\"Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})\"\"\"\n <|body_0|>\n\n def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SI_SNR, self).__init__()\n self.eps = eps\n self.pit = pit\n<|end_body_0|>\n\n<|body_start_1|>\n B, C, S = Y.size()\n zero_mean_target = Y - torch.mean(Y, dim=-1, keepdim=True)\n zero_mean_estimate = Y_ - torch.mean(Y_, dim=-1, keepdim=True)\n s_target = torch.unsqueeze(zero_mean_target, dim=1)\n s_estimate = torch.unsqueeze(zero_mean_estimate, dim=2)\n pair_wise_dot = torch.sum(s_estimate * s_target, dim=3, keepdim=True)\n s_target_energy = torch.sum(s_target ** 2, dim=3, keepdim=True) + self.eps\n pair_wise_proj = pair_wise_dot * s_target / s_target_energy\n e_noise = s_estimate - pair_wise_proj\n pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=3) / (torch.sum(e_noise ** 2, dim=3) + self.eps)\n pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + self.eps)\n if self.pit:\n perms = Y.new_tensor(list(permutations(range(C)))).long()\n index = torch.unsqueeze(perms, 2)\n perms_one_hot = Y.new_zeros((*perms.size(), C)).scatter_(2, index, 1)\n snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])\n max_snr_idx = torch.argmax(snr_set, dim=1)\n max_snr, _ = torch.max(snr_set, dim=1)\n max_snr /= C\n si_snr = max_snr if self.pit else pair_wise_si_snr\n loss = 0 - torch.mean(si_snr)\n return loss\n<|end_body_1|>\n", "revision_id": "2415502fa8a38d4624b1c71e926f1723bdc8535c", "skeleton": "<|skeleton|>\nclass SI_SNR:\n \"\"\"Scale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325\"\"\"\n\n def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None:\n \"\"\"Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})\"\"\"\n <|body_0|>\n\n def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SI_SNR:\n \"\"\"Scale Invariant Signal to Noise Ratio with support for PIT Training Adapted from: - https://github.com/kaituoxu/Conv-TasNet Attributes: eps {float} -- epsilon to avoid 0 division pit {bool} -- use pit training https://arxiv.org/abs/1607.00325\"\"\"\n\n def __init__(self: 'SI_SNR', eps: float=1e-08, pit: bool=False) -> None:\n \"\"\"Initialization Keyword Arguments: eps {float} -- epsilon to avoid 0 division (default: {1e-8}) pit {bool} -- use pit training (default: {False})\"\"\"\n super(SI_SNR, self).__init__()\n self.eps = eps\n self.pit = pit\n\n def forward(self: 'SI_SNR', Y_: torch.Tensor, Y: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward Pass zero-mean prior to calculation: s_target = (<ŝ,s>.s) / ||s||² e_noise = ŝ - s_target si_snr = 10 torch.log10(||s_target||² / ||e_noise||²) Arguments: Y_ {torch.Tensor} -- estimated source separation input tensor Y {torch.Tensor} -- target source separation input tensor Returns: torch.Tensor -- si snr output loss tensor\"\"\"\n B, C, S = Y.size()\n zero_mean_target = Y - torch.mean(Y, dim=-1, keepdim=True)\n zero_mean_estimate = Y_ - torch.mean(Y_, dim=-1, keepdim=True)\n s_target = torch.unsqueeze(zero_mean_target, dim=1)\n s_estimate = torch.unsqueeze(zero_mean_estimate, dim=2)\n pair_wise_dot = torch.sum(s_estimate * s_target, dim=3, keepdim=True)\n s_target_energy = torch.sum(s_target ** 2, dim=3, keepdim=True) + self.eps\n pair_wise_proj = pair_wise_dot * s_target / s_target_energy\n e_noise = s_estimate - pair_wise_proj\n pair_wise_si_snr = torch.sum(pair_wise_proj ** 2, dim=3) / (torch.sum(e_noise ** 2, dim=3) + self.eps)\n pair_wise_si_snr = 10 * torch.log10(pair_wise_si_snr + self.eps)\n if self.pit:\n perms = Y.new_tensor(list(permutations(range(C)))).long()\n index = torch.unsqueeze(perms, 2)\n perms_one_hot = Y.new_zeros((*perms.size(), C)).scatter_(2, index, 1)\n snr_set = torch.einsum('bij,pij->bp', [pair_wise_si_snr, perms_one_hot])\n max_snr_idx = torch.argmax(snr_set, dim=1)\n max_snr, _ = torch.max(snr_set, dim=1)\n max_snr /= C\n si_snr = max_snr if self.pit else pair_wise_si_snr\n loss = 0 - torch.mean(si_snr)\n return loss\n", "source": "the_stack_v2_python_sparse", "source_path": "SPK_SP_Master/wass/convtasnet/loss.py", "source_repo": "adamwhitakerwilson/speaker_separation", "split": "test", "star_events_count": 0} {"blob_id": "74a626bfc88798c76fea7e0b97571392adb97d03", "bodies": ["super(DeactivateReactivateBlockStorage, cls).setUpClass()\ncls.server = cls.compute.servers.behaviors.create_active_server().entity\ncls.image = cls.compute.images.behaviors.create_active_image(cls.server.id).entity\ncls.resources.add(cls.server.id, cls.compute.servers.client.delete_server)\ncls.resources.add(cls.image.id, cls.images_client.delete_image)", "self.resp = self.images_admin.client.deactivate_image(self.image.id)\nself.assertEqual(204, self.resp.status_code)\nresp = self.volumes.client.create_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\nself.assertEqual(400, resp.status_code)", "resp = self.images_admin.client.reactivate_image(self.image.id)\nself.assertEqual(204, resp.status_code)\ncreated_volume = self.volumes.behaviors.create_available_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\nself.resources.add(created_volume.id_, self.volumes.client.delete_volume)\nself.assertImageToVolumeCopySucceeded(created_volume.id_, self.volumes.config.min_volume_from_image_size)"], "bodies_text": "<|body_start_0|>\n super(DeactivateReactivateBlockStorage, cls).setUpClass()\n cls.server = cls.compute.servers.behaviors.create_active_server().entity\n cls.image = cls.compute.images.behaviors.create_active_image(cls.server.id).entity\n cls.resources.add(cls.server.id, cls.compute.servers.client.delete_server)\n cls.resources.add(cls.image.id, cls.images_client.delete_image)\n<|end_body_0|>\n\n<|body_start_1|>\n self.resp = self.images_admin.client.deactivate_image(self.image.id)\n self.assertEqual(204, self.resp.status_code)\n resp = self.volumes.client.create_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.assertEqual(400, resp.status_code)\n<|end_body_1|>\n\n<|body_start_2|>\n resp = self.images_admin.client.reactivate_image(self.image.id)\n self.assertEqual(204, resp.status_code)\n created_volume = self.volumes.behaviors.create_available_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.resources.add(created_volume.id_, self.volumes.client.delete_volume)\n self.assertImageToVolumeCopySucceeded(created_volume.id_, self.volumes.config.min_volume_from_image_size)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "DeactivateReactivateBlockStorage", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DeactivateReactivateBlockStorage:\n\n def setUpClass(cls):\n \"\"\"Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite\"\"\"\n <|body_0|>\n\n def test_create_volume_from_deactivated_image_invalid(self):\n \"\"\"Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400\"\"\"\n <|body_1|>\n\n def test_create_volume_from_reactivated_image(self):\n \"\"\"Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DeactivateReactivateBlockStorage, cls).setUpClass()\n cls.server = cls.compute.servers.behaviors.create_active_server().entity\n cls.image = cls.compute.images.behaviors.create_active_image(cls.server.id).entity\n cls.resources.add(cls.server.id, cls.compute.servers.client.delete_server)\n cls.resources.add(cls.image.id, cls.images_client.delete_image)\n<|end_body_0|>\n\n<|body_start_1|>\n self.resp = self.images_admin.client.deactivate_image(self.image.id)\n self.assertEqual(204, self.resp.status_code)\n resp = self.volumes.client.create_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.assertEqual(400, resp.status_code)\n<|end_body_1|>\n\n<|body_start_2|>\n resp = self.images_admin.client.reactivate_image(self.image.id)\n self.assertEqual(204, resp.status_code)\n created_volume = self.volumes.behaviors.create_available_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.resources.add(created_volume.id_, self.volumes.client.delete_volume)\n self.assertImageToVolumeCopySucceeded(created_volume.id_, self.volumes.config.min_volume_from_image_size)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000437", "length_bytes": 3822, "license_type": "permissive", "methods": [{"docstring": "Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite", "name": "setUpClass", "signature": "def setUpClass(cls)"}, {"docstring": "Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400", "name": "test_create_volume_from_deactivated_image_invalid", "signature": "def test_create_volume_from_deactivated_image_invalid(self)"}, {"docstring": "Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available", "name": "test_create_volume_from_reactivated_image", "signature": "def test_create_volume_from_reactivated_image(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_048520", "prompt": "Implement the Python class `DeactivateReactivateBlockStorage` described below.\n\nClass description:\nImplement the DeactivateReactivateBlockStorage class.\n\nMethod signatures and docstrings:\n- def setUpClass(cls): Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite\n- def test_create_volume_from_deactivated_image_invalid(self): Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400\n- def test_create_volume_from_reactivated_image(self): Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available", "prompted_full_text": "Implement the Python class `DeactivateReactivateBlockStorage` described below.\n\nClass description:\nImplement the DeactivateReactivateBlockStorage class.\n\nMethod signatures and docstrings:\n- def setUpClass(cls): Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite\n- def test_create_volume_from_deactivated_image_invalid(self): Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400\n- def test_create_volume_from_reactivated_image(self): Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available\n\n<|skeleton|>\nclass DeactivateReactivateBlockStorage:\n\n def setUpClass(cls):\n \"\"\"Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite\"\"\"\n <|body_0|>\n\n def test_create_volume_from_deactivated_image_invalid(self):\n \"\"\"Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400\"\"\"\n <|body_1|>\n\n def test_create_volume_from_reactivated_image(self):\n \"\"\"Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DeactivateReactivateBlockStorage, cls).setUpClass()\n cls.server = cls.compute.servers.behaviors.create_active_server().entity\n cls.image = cls.compute.images.behaviors.create_active_image(cls.server.id).entity\n cls.resources.add(cls.server.id, cls.compute.servers.client.delete_server)\n cls.resources.add(cls.image.id, cls.images_client.delete_image)\n<|end_body_0|>\n\n<|body_start_1|>\n self.resp = self.images_admin.client.deactivate_image(self.image.id)\n self.assertEqual(204, self.resp.status_code)\n resp = self.volumes.client.create_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.assertEqual(400, resp.status_code)\n<|end_body_1|>\n\n<|body_start_2|>\n resp = self.images_admin.client.reactivate_image(self.image.id)\n self.assertEqual(204, resp.status_code)\n created_volume = self.volumes.behaviors.create_available_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.resources.add(created_volume.id_, self.volumes.client.delete_volume)\n self.assertImageToVolumeCopySucceeded(created_volume.id_, self.volumes.config.min_volume_from_image_size)\n<|end_body_2|>\n", "revision_id": "30f0e64672676c3f90b4a582fe90fac6621475b3", "skeleton": "<|skeleton|>\nclass DeactivateReactivateBlockStorage:\n\n def setUpClass(cls):\n \"\"\"Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite\"\"\"\n <|body_0|>\n\n def test_create_volume_from_deactivated_image_invalid(self):\n \"\"\"Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400\"\"\"\n <|body_1|>\n\n def test_create_volume_from_reactivated_image(self):\n \"\"\"Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DeactivateReactivateBlockStorage:\n def setUpClass(cls):\n \"\"\"Perform actions that setup the necessary resources for testing The following resources are created during this setup: - A server with defaults defined in server behaviors - An image from the newly created server The following data is generated during this set up: - Get compute integration composite\"\"\"\n super(DeactivateReactivateBlockStorage, cls).setUpClass()\n cls.server = cls.compute.servers.behaviors.create_active_server().entity\n cls.image = cls.compute.images.behaviors.create_active_image(cls.server.id).entity\n cls.resources.add(cls.server.id, cls.compute.servers.client.delete_server)\n cls.resources.add(cls.image.id, cls.images_client.delete_image)\n\n def test_create_volume_from_deactivated_image_invalid(self):\n \"\"\"Verify that a volume cannot be created from a deactivated image Attempt to create a volume using a deactivated image This test will be successful if: - The response code received for deactivate image is a 204 - The response code received for create volume is a 400\"\"\"\n self.resp = self.images_admin.client.deactivate_image(self.image.id)\n self.assertEqual(204, self.resp.status_code)\n resp = self.volumes.client.create_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.assertEqual(400, resp.status_code)\n\n def test_create_volume_from_reactivated_image(self):\n \"\"\"Verify that a volume can be created from a reactivated image Create a volume using a reactivated image This test will be successful if: - The response code received for reactivate image is a 204 - The volume status is available\"\"\"\n resp = self.images_admin.client.reactivate_image(self.image.id)\n self.assertEqual(204, resp.status_code)\n created_volume = self.volumes.behaviors.create_available_volume(size=self.volumes.config.min_volume_from_image_size, volume_type=self.volumes.config.default_volume_type, image_ref=self.image.id)\n self.resources.add(created_volume.id_, self.volumes.client.delete_volume)\n self.assertImageToVolumeCopySucceeded(created_volume.id_, self.volumes.config.min_volume_from_image_size)\n", "source": "the_stack_v2_python_sparse", "source_path": "cloudroast/glance/integration/blockstorage/deactivate_reactivate_block_storage_test.py", "source_repo": "RULCSoft/cloudroast", "split": "test", "star_events_count": 1} {"blob_id": "ef28dba2899646192d7caf2ae8c731db744294aa", "bodies": ["specs = super().getInputSpecification()\nspecs.name = 'logtransformer'\nspecs.description = 'applies the natural logarithm to the data and inverts by applying the\\n exponential function.'\nreturn specs", "for t, (target, data) in enumerate(params.items()):\n if np.any(initial[:, t] <= 0):\n raise ValueError(f'Log transformation requires strictly positive values, and negative values were found in target \"{target}\"! If negative values were expected, perhaps an ArcsinhTransformer would be more appropriate?')\nreturn super().getResidual(initial, params, pivot, settings)"], "bodies_text": "<|body_start_0|>\n specs = super().getInputSpecification()\n specs.name = 'logtransformer'\n specs.description = 'applies the natural logarithm to the data and inverts by applying the\\n exponential function.'\n return specs\n<|end_body_0|>\n\n<|body_start_1|>\n for t, (target, data) in enumerate(params.items()):\n if np.any(initial[:, t] <= 0):\n raise ValueError(f'Log transformation requires strictly positive values, and negative values were found in target \"{target}\"! If negative values were expected, perhaps an ArcsinhTransformer would be more appropriate?')\n return super().getResidual(initial, params, pivot, settings)\n<|end_body_1|>\n", "class_docstring": "Wrapper of scikit-learn's FunctionTransformer for np.log/np.exp", "class_name": "LogTransformer", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-warranty-disclaimer", "BSD-2-Clause", "BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass LogTransformer:\n \"\"\"Wrapper of scikit-learn's FunctionTransformer for np.log/np.exp\"\"\"\n\n def getInputSpecification(cls):\n \"\"\"Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.\"\"\"\n <|body_0|>\n\n def getResidual(self, initial, params, pivot, settings):\n \"\"\"Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n specs = super().getInputSpecification()\n specs.name = 'logtransformer'\n specs.description = 'applies the natural logarithm to the data and inverts by applying the\\n exponential function.'\n return specs\n<|end_body_0|>\n\n<|body_start_1|>\n for t, (target, data) in enumerate(params.items()):\n if np.any(initial[:, t] <= 0):\n raise ValueError(f'Log transformation requires strictly positive values, and negative values were found in target \"{target}\"! If negative values were expected, perhaps an ArcsinhTransformer would be more appropriate?')\n return super().getResidual(initial, params, pivot, settings)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000438", "length_bytes": 6301, "license_type": "permissive", "methods": [{"docstring": "Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.", "name": "getInputSpecification", "signature": "def getInputSpecification(cls)"}, {"docstring": "Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]", "name": "getResidual", "signature": "def getResidual(self, initial, params, pivot, settings)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_021319", "prompt": "Implement the Python class `LogTransformer` described below.\n\nClass description:\nWrapper of scikit-learn's FunctionTransformer for np.log/np.exp\n\nMethod signatures and docstrings:\n- def getInputSpecification(cls): Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.\n- def getResidual(self, initial, params, pivot, settings): Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]", "prompted_full_text": "Implement the Python class `LogTransformer` described below.\n\nClass description:\nWrapper of scikit-learn's FunctionTransformer for np.log/np.exp\n\nMethod signatures and docstrings:\n- def getInputSpecification(cls): Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.\n- def getResidual(self, initial, params, pivot, settings): Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]\n\n<|skeleton|>\nclass LogTransformer:\n \"\"\"Wrapper of scikit-learn's FunctionTransformer for np.log/np.exp\"\"\"\n\n def getInputSpecification(cls):\n \"\"\"Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.\"\"\"\n <|body_0|>\n\n def getResidual(self, initial, params, pivot, settings):\n \"\"\"Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n specs = super().getInputSpecification()\n specs.name = 'logtransformer'\n specs.description = 'applies the natural logarithm to the data and inverts by applying the\\n exponential function.'\n return specs\n<|end_body_0|>\n\n<|body_start_1|>\n for t, (target, data) in enumerate(params.items()):\n if np.any(initial[:, t] <= 0):\n raise ValueError(f'Log transformation requires strictly positive values, and negative values were found in target \"{target}\"! If negative values were expected, perhaps an ArcsinhTransformer would be more appropriate?')\n return super().getResidual(initial, params, pivot, settings)\n<|end_body_1|>\n", "revision_id": "2b16e7aa3325fe84cab2477947a951414c635381", "skeleton": "<|skeleton|>\nclass LogTransformer:\n \"\"\"Wrapper of scikit-learn's FunctionTransformer for np.log/np.exp\"\"\"\n\n def getInputSpecification(cls):\n \"\"\"Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.\"\"\"\n <|body_0|>\n\n def getResidual(self, initial, params, pivot, settings):\n \"\"\"Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class LogTransformer:\n \"\"\"Wrapper of scikit-learn's FunctionTransformer for np.log/np.exp\"\"\"\n\n def getInputSpecification(cls):\n \"\"\"Method to get a reference to a class that specifies the input data for class cls. @ In, None @ Out, specs, InputData.ParameterInput, class to use for specifying input of cls.\"\"\"\n specs = super().getInputSpecification()\n specs.name = 'logtransformer'\n specs.description = 'applies the natural logarithm to the data and inverts by applying the\\n exponential function.'\n return specs\n\n def getResidual(self, initial, params, pivot, settings):\n \"\"\"Removes trained signal from data and find residual @ In, initial, np.array, original signal shaped [pivotValues, targets], targets MUST be in same order as self.target @ In, params, dict, training parameters as from self.characterize @ In, pivot, np.array, time-like array values @ In, settings, dict, additional settings specific to algorithm @ Out, residual, np.array, reduced signal shaped [pivotValues, targets]\"\"\"\n for t, (target, data) in enumerate(params.items()):\n if np.any(initial[:, t] <= 0):\n raise ValueError(f'Log transformation requires strictly positive values, and negative values were found in target \"{target}\"! If negative values were expected, perhaps an ArcsinhTransformer would be more appropriate?')\n return super().getResidual(initial, params, pivot, settings)\n", "source": "the_stack_v2_python_sparse", "source_path": "ravenframework/TSA/Transformers/FunctionTransformers.py", "source_repo": "idaholab/raven", "split": "test", "star_events_count": 201} {"blob_id": "c8274dd4362f7f021dda4162a6454be59441ba2b", "bodies": ["func = lab4.squared\ncase1 = [1, 2, 3]\nexpected1 = [1, 4, 9]\nself.assertEqual(func(case1), expected1)", "func = lab4.check_title\ncase2 = ['Hello World', 'hi', 'Hello']\nexpected2 = ['Hello World', 'Hello']\nself.assertEqual(func(case2), expected2)", "func = lab4.restock_inventory\ncase3 = {'a': 10, 'b': 5, 'c': 7}\nexpected3 = {'a': 20, 'b': 15, 'c': 17}\nself.assertEqual(func(case3), expected3)", "func = lab4.filter_0_items\ncase4 = {'a': 10, 'b': 0, 'c': 7}\nexpected4 = {'a': 10, 'c': 7}\nself.assertEqual(func(case4), expected4)", "func = lab4.average_grades\ncase5 = {'Michael': [100, 78, 88, 900 / 10], 'Daniel': [80, 95, 77, 64.0], 'Josh': [99, 89, 94, 66]}\nexpected5 = {'Michael': 89.0, 'Daniel': 79.0, 'Josh': 87.0}\nself.assertEqual(func(case5), expected5)"], "bodies_text": "<|body_start_0|>\n func = lab4.squared\n case1 = [1, 2, 3]\n expected1 = [1, 4, 9]\n self.assertEqual(func(case1), expected1)\n<|end_body_0|>\n\n<|body_start_1|>\n func = lab4.check_title\n case2 = ['Hello World', 'hi', 'Hello']\n expected2 = ['Hello World', 'Hello']\n self.assertEqual(func(case2), expected2)\n<|end_body_1|>\n\n<|body_start_2|>\n func = lab4.restock_inventory\n case3 = {'a': 10, 'b': 5, 'c': 7}\n expected3 = {'a': 20, 'b': 15, 'c': 17}\n self.assertEqual(func(case3), expected3)\n<|end_body_2|>\n\n<|body_start_3|>\n func = lab4.filter_0_items\n case4 = {'a': 10, 'b': 0, 'c': 7}\n expected4 = {'a': 10, 'c': 7}\n self.assertEqual(func(case4), expected4)\n<|end_body_3|>\n\n<|body_start_4|>\n func = lab4.average_grades\n case5 = {'Michael': [100, 78, 88, 900 / 10], 'Daniel': [80, 95, 77, 64.0], 'Josh': [99, 89, 94, 66]}\n expected5 = {'Michael': 89.0, 'Daniel': 79.0, 'Josh': 87.0}\n self.assertEqual(func(case5), expected5)\n<|end_body_4|>\n", "class_docstring": "", "class_name": "Lab4Test", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Lab4Test:\n\n def test_squared(self):\n \"\"\"testing squared function\"\"\"\n <|body_0|>\n\n def test_check_title(self):\n \"\"\"testing check_title function\"\"\"\n <|body_1|>\n\n def test_restock_inventory(self):\n \"\"\"testing restock_inventory function\"\"\"\n <|body_2|>\n\n def test_filter_0_items(self):\n \"\"\"testing filter_0_items function\"\"\"\n <|body_3|>\n\n def test_average_grades(self):\n \"\"\"testing average_grades function\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n func = lab4.squared\n case1 = [1, 2, 3]\n expected1 = [1, 4, 9]\n self.assertEqual(func(case1), expected1)\n<|end_body_0|>\n\n<|body_start_1|>\n func = lab4.check_title\n case2 = ['Hello World', 'hi', 'Hello']\n expected2 = ['Hello World', 'Hello']\n self.assertEqual(func(case2), expected2)\n<|end_body_1|>\n\n<|body_start_2|>\n func = lab4.restock_inventory\n case3 = {'a': 10, 'b': 5, 'c': 7}\n expected3 = {'a': 20, 'b': 15, 'c': 17}\n self.assertEqual(func(case3), expected3)\n<|end_body_2|>\n\n<|body_start_3|>\n func = lab4.filter_0_items\n case4 = {'a': 10, 'b': 0, 'c': 7}\n expected4 = {'a': 10, 'c': 7}\n self.assertEqual(func(case4), expected4)\n<|end_body_3|>\n\n<|body_start_4|>\n func = lab4.average_grades\n case5 = {'Michael': [100, 78, 88, 900 / 10], 'Daniel': [80, 95, 77, 64.0], 'Josh': [99, 89, 94, 66]}\n expected5 = {'Michael': 89.0, 'Daniel': 79.0, 'Josh': 87.0}\n self.assertEqual(func(case5), expected5)\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000439", "length_bytes": 1643, "license_type": "no_license", "methods": [{"docstring": "testing squared function", "name": "test_squared", "signature": "def test_squared(self)"}, {"docstring": "testing check_title function", "name": "test_check_title", "signature": "def test_check_title(self)"}, {"docstring": "testing restock_inventory function", "name": "test_restock_inventory", "signature": "def test_restock_inventory(self)"}, {"docstring": "testing filter_0_items function", "name": "test_filter_0_items", "signature": "def test_filter_0_items(self)"}, {"docstring": "testing average_grades function", "name": "test_average_grades", "signature": "def test_average_grades(self)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_047819", "prompt": "Implement the Python class `Lab4Test` described below.\n\nClass description:\nImplement the Lab4Test class.\n\nMethod signatures and docstrings:\n- def test_squared(self): testing squared function\n- def test_check_title(self): testing check_title function\n- def test_restock_inventory(self): testing restock_inventory function\n- def test_filter_0_items(self): testing filter_0_items function\n- def test_average_grades(self): testing average_grades function", "prompted_full_text": "Implement the Python class `Lab4Test` described below.\n\nClass description:\nImplement the Lab4Test class.\n\nMethod signatures and docstrings:\n- def test_squared(self): testing squared function\n- def test_check_title(self): testing check_title function\n- def test_restock_inventory(self): testing restock_inventory function\n- def test_filter_0_items(self): testing filter_0_items function\n- def test_average_grades(self): testing average_grades function\n\n<|skeleton|>\nclass Lab4Test:\n\n def test_squared(self):\n \"\"\"testing squared function\"\"\"\n <|body_0|>\n\n def test_check_title(self):\n \"\"\"testing check_title function\"\"\"\n <|body_1|>\n\n def test_restock_inventory(self):\n \"\"\"testing restock_inventory function\"\"\"\n <|body_2|>\n\n def test_filter_0_items(self):\n \"\"\"testing filter_0_items function\"\"\"\n <|body_3|>\n\n def test_average_grades(self):\n \"\"\"testing average_grades function\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n func = lab4.squared\n case1 = [1, 2, 3]\n expected1 = [1, 4, 9]\n self.assertEqual(func(case1), expected1)\n<|end_body_0|>\n\n<|body_start_1|>\n func = lab4.check_title\n case2 = ['Hello World', 'hi', 'Hello']\n expected2 = ['Hello World', 'Hello']\n self.assertEqual(func(case2), expected2)\n<|end_body_1|>\n\n<|body_start_2|>\n func = lab4.restock_inventory\n case3 = {'a': 10, 'b': 5, 'c': 7}\n expected3 = {'a': 20, 'b': 15, 'c': 17}\n self.assertEqual(func(case3), expected3)\n<|end_body_2|>\n\n<|body_start_3|>\n func = lab4.filter_0_items\n case4 = {'a': 10, 'b': 0, 'c': 7}\n expected4 = {'a': 10, 'c': 7}\n self.assertEqual(func(case4), expected4)\n<|end_body_3|>\n\n<|body_start_4|>\n func = lab4.average_grades\n case5 = {'Michael': [100, 78, 88, 900 / 10], 'Daniel': [80, 95, 77, 64.0], 'Josh': [99, 89, 94, 66]}\n expected5 = {'Michael': 89.0, 'Daniel': 79.0, 'Josh': 87.0}\n self.assertEqual(func(case5), expected5)\n<|end_body_4|>\n", "revision_id": "af66da9ce5b3df549d11c6ec5268bb9fc347cbb0", "skeleton": "<|skeleton|>\nclass Lab4Test:\n\n def test_squared(self):\n \"\"\"testing squared function\"\"\"\n <|body_0|>\n\n def test_check_title(self):\n \"\"\"testing check_title function\"\"\"\n <|body_1|>\n\n def test_restock_inventory(self):\n \"\"\"testing restock_inventory function\"\"\"\n <|body_2|>\n\n def test_filter_0_items(self):\n \"\"\"testing filter_0_items function\"\"\"\n <|body_3|>\n\n def test_average_grades(self):\n \"\"\"testing average_grades function\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Lab4Test:\n def test_squared(self):\n \"\"\"testing squared function\"\"\"\n func = lab4.squared\n case1 = [1, 2, 3]\n expected1 = [1, 4, 9]\n self.assertEqual(func(case1), expected1)\n\n def test_check_title(self):\n \"\"\"testing check_title function\"\"\"\n func = lab4.check_title\n case2 = ['Hello World', 'hi', 'Hello']\n expected2 = ['Hello World', 'Hello']\n self.assertEqual(func(case2), expected2)\n\n def test_restock_inventory(self):\n \"\"\"testing restock_inventory function\"\"\"\n func = lab4.restock_inventory\n case3 = {'a': 10, 'b': 5, 'c': 7}\n expected3 = {'a': 20, 'b': 15, 'c': 17}\n self.assertEqual(func(case3), expected3)\n\n def test_filter_0_items(self):\n \"\"\"testing filter_0_items function\"\"\"\n func = lab4.filter_0_items\n case4 = {'a': 10, 'b': 0, 'c': 7}\n expected4 = {'a': 10, 'c': 7}\n self.assertEqual(func(case4), expected4)\n\n def test_average_grades(self):\n \"\"\"testing average_grades function\"\"\"\n func = lab4.average_grades\n case5 = {'Michael': [100, 78, 88, 900 / 10], 'Daniel': [80, 95, 77, 64.0], 'Josh': [99, 89, 94, 66]}\n expected5 = {'Michael': 89.0, 'Daniel': 79.0, 'Josh': 87.0}\n self.assertEqual(func(case5), expected5)\n", "source": "the_stack_v2_python_sparse", "source_path": "labs/week4/test_lab4.py", "source_repo": "ArjunPadaliya/COMP-805", "split": "test", "star_events_count": 0} {"blob_id": "611a2a180f1cb77fc415c4430ed0eb8d723aa8b2", "bodies": ["color2D = ee.Dictionary(ee.Dictionary(color).get('2D'))\nsaturation = ee.Image(color2D.get('saturation'))\nvalue = ee.Image(color2D.get('value'))\nthreshold = value.subtract(0.15).updateMask(value.lt(0.3)).unmask(0.15, False)\ngrey_and_bright = saturation.lt(ee.Image(threshold))\ncold = ee.Image(BT).lt(20)\ncloud = grey_and_bright.multiply(cold)\ncloudy = cloud.distance(ee.Kernel.euclidean(100, 'meters')).gte(0).unmask(0, False)\nreturn cloudy.rename(['cloud'])", "ndwi = ee.Image(toa).normalizedDifference(['green', 'nir'])\nwater = ndwi.gte(0.1)\nreturn water.rename(['water'])"], "bodies_text": "<|body_start_0|>\n color2D = ee.Dictionary(ee.Dictionary(color).get('2D'))\n saturation = ee.Image(color2D.get('saturation'))\n value = ee.Image(color2D.get('value'))\n threshold = value.subtract(0.15).updateMask(value.lt(0.3)).unmask(0.15, False)\n grey_and_bright = saturation.lt(ee.Image(threshold))\n cold = ee.Image(BT).lt(20)\n cloud = grey_and_bright.multiply(cold)\n cloudy = cloud.distance(ee.Kernel.euclidean(100, 'meters')).gte(0).unmask(0, False)\n return cloudy.rename(['cloud'])\n<|end_body_0|>\n\n<|body_start_1|>\n ndwi = ee.Image(toa).normalizedDifference(['green', 'nir'])\n water = ndwi.gte(0.1)\n return water.rename(['water'])\n<|end_body_1|>\n", "class_docstring": "Finds cloud, water and valid TIR pixels", "class_name": "Mask", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Mask:\n \"\"\"Finds cloud, water and valid TIR pixels\"\"\"\n\n def cloud(color, BT):\n \"\"\"Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)\"\"\"\n <|body_0|>\n\n def water(toa):\n \"\"\"Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n color2D = ee.Dictionary(ee.Dictionary(color).get('2D'))\n saturation = ee.Image(color2D.get('saturation'))\n value = ee.Image(color2D.get('value'))\n threshold = value.subtract(0.15).updateMask(value.lt(0.3)).unmask(0.15, False)\n grey_and_bright = saturation.lt(ee.Image(threshold))\n cold = ee.Image(BT).lt(20)\n cloud = grey_and_bright.multiply(cold)\n cloudy = cloud.distance(ee.Kernel.euclidean(100, 'meters')).gte(0).unmask(0, False)\n return cloudy.rename(['cloud'])\n<|end_body_0|>\n\n<|body_start_1|>\n ndwi = ee.Image(toa).normalizedDifference(['green', 'nir'])\n water = ndwi.gte(0.1)\n return water.rename(['water'])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000440", "length_bytes": 1682, "license_type": "no_license", "methods": [{"docstring": "Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)", "name": "cloud", "signature": "def cloud(color, BT)"}, {"docstring": "Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)", "name": "water", "signature": "def water(toa)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_test_001623", "prompt": "Implement the Python class `Mask` described below.\n\nClass description:\nFinds cloud, water and valid TIR pixels\n\nMethod signatures and docstrings:\n- def cloud(color, BT): Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)\n- def water(toa): Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)", "prompted_full_text": "Implement the Python class `Mask` described below.\n\nClass description:\nFinds cloud, water and valid TIR pixels\n\nMethod signatures and docstrings:\n- def cloud(color, BT): Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)\n- def water(toa): Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)\n\n<|skeleton|>\nclass Mask:\n \"\"\"Finds cloud, water and valid TIR pixels\"\"\"\n\n def cloud(color, BT):\n \"\"\"Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)\"\"\"\n <|body_0|>\n\n def water(toa):\n \"\"\"Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n color2D = ee.Dictionary(ee.Dictionary(color).get('2D'))\n saturation = ee.Image(color2D.get('saturation'))\n value = ee.Image(color2D.get('value'))\n threshold = value.subtract(0.15).updateMask(value.lt(0.3)).unmask(0.15, False)\n grey_and_bright = saturation.lt(ee.Image(threshold))\n cold = ee.Image(BT).lt(20)\n cloud = grey_and_bright.multiply(cold)\n cloudy = cloud.distance(ee.Kernel.euclidean(100, 'meters')).gte(0).unmask(0, False)\n return cloudy.rename(['cloud'])\n<|end_body_0|>\n\n<|body_start_1|>\n ndwi = ee.Image(toa).normalizedDifference(['green', 'nir'])\n water = ndwi.gte(0.1)\n return water.rename(['water'])\n<|end_body_1|>\n", "revision_id": "b57ac0c18ce37b0f71f59fc8d254fa12890090ee", "skeleton": "<|skeleton|>\nclass Mask:\n \"\"\"Finds cloud, water and valid TIR pixels\"\"\"\n\n def cloud(color, BT):\n \"\"\"Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)\"\"\"\n <|body_0|>\n\n def water(toa):\n \"\"\"Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Mask:\n \"\"\"Finds cloud, water and valid TIR pixels\"\"\"\n\n def cloud(color, BT):\n \"\"\"Cloud pixels are grey, bright and cold - grey and bright: 2D Saturation and Value - cold: Brightness Temperature More detail on grey and bright threshold: - Saturation always < 0.1 - if Value between 0.1 and 0.2 then Saturation must be 0.1 less than Value - Value always > 0.1 (i.e. negative Saturation not possible)\"\"\"\n color2D = ee.Dictionary(ee.Dictionary(color).get('2D'))\n saturation = ee.Image(color2D.get('saturation'))\n value = ee.Image(color2D.get('value'))\n threshold = value.subtract(0.15).updateMask(value.lt(0.3)).unmask(0.15, False)\n grey_and_bright = saturation.lt(ee.Image(threshold))\n cold = ee.Image(BT).lt(20)\n cloud = grey_and_bright.multiply(cold)\n cloudy = cloud.distance(ee.Kernel.euclidean(100, 'meters')).gte(0).unmask(0, False)\n return cloudy.rename(['cloud'])\n\n def water(toa):\n \"\"\"Water pixels have an NDWI > 0.1 (Normalized Difference Water Index)\"\"\"\n ndwi = ee.Image(toa).normalizedDifference(['green', 'nir'])\n water = ndwi.gte(0.1)\n return water.rename(['water'])\n", "source": "the_stack_v2_python_sparse", "source_path": "bin/masks.py", "source_repo": "YutingYao/crater_lakes", "split": "test", "star_events_count": 0} {"blob_id": "6d5680d37b5108536d2edd678564437393d4ab6a", "bodies": ["if nums is None or len(nums) <= 0:\n return\nmy_map = {}\nfor i in range(len(nums)):\n if target - nums[i] in my_map:\n return list((my_map[target - nums[i]], i))\n else:\n my_map[nums[i]] = i\nreturn", "new_nums = nums[:]\nfor num in nums:\n new_nums.remove(num)\n if target - num in new_nums:\n if target - num == num:\n return [i for i, x in enumerate(nums) if x == num]\n else:\n return sorted([nums.index(num), nums.index(target - num)])\n new_nums = nums[:]"], "bodies_text": "<|body_start_0|>\n if nums is None or len(nums) <= 0:\n return\n my_map = {}\n for i in range(len(nums)):\n if target - nums[i] in my_map:\n return list((my_map[target - nums[i]], i))\n else:\n my_map[nums[i]] = i\n return\n<|end_body_0|>\n\n<|body_start_1|>\n new_nums = nums[:]\n for num in nums:\n new_nums.remove(num)\n if target - num in new_nums:\n if target - num == num:\n return [i for i, x in enumerate(nums) if x == num]\n else:\n return sorted([nums.index(num), nums.index(target - num)])\n new_nums = nums[:]\n<|end_body_1|>\n", "class_docstring": "https://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n \"\"\"https://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标\"\"\"\n\n def two_sum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def two_sum2(self, nums, target):\n \"\"\"第二种写法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums is None or len(nums) <= 0:\n return\n my_map = {}\n for i in range(len(nums)):\n if target - nums[i] in my_map:\n return list((my_map[target - nums[i]], i))\n else:\n my_map[nums[i]] = i\n return\n<|end_body_0|>\n\n<|body_start_1|>\n new_nums = nums[:]\n for num in nums:\n new_nums.remove(num)\n if target - num in new_nums:\n if target - num == num:\n return [i for i, x in enumerate(nums) if x == num]\n else:\n return sorted([nums.index(num), nums.index(target - num)])\n new_nums = nums[:]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000441", "length_bytes": 1249, "license_type": "no_license", "methods": [{"docstring": ":type nums: List[int] :type target: int :rtype: List[int]", "name": "two_sum", "signature": "def two_sum(self, nums, target)"}, {"docstring": "第二种写法", "name": "two_sum2", "signature": "def two_sum2(self, nums, target)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036434", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nhttps://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标\n\nMethod signatures and docstrings:\n- def two_sum(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def two_sum2(self, nums, target): 第二种写法", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nhttps://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标\n\nMethod signatures and docstrings:\n- def two_sum(self, nums, target): :type nums: List[int] :type target: int :rtype: List[int]\n- def two_sum2(self, nums, target): 第二种写法\n\n<|skeleton|>\nclass Solution:\n \"\"\"https://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标\"\"\"\n\n def two_sum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def two_sum2(self, nums, target):\n \"\"\"第二种写法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if nums is None or len(nums) <= 0:\n return\n my_map = {}\n for i in range(len(nums)):\n if target - nums[i] in my_map:\n return list((my_map[target - nums[i]], i))\n else:\n my_map[nums[i]] = i\n return\n<|end_body_0|>\n\n<|body_start_1|>\n new_nums = nums[:]\n for num in nums:\n new_nums.remove(num)\n if target - num in new_nums:\n if target - num == num:\n return [i for i, x in enumerate(nums) if x == num]\n else:\n return sorted([nums.index(num), nums.index(target - num)])\n new_nums = nums[:]\n<|end_body_1|>\n", "revision_id": "077cbd25d480c21ac994f7b33e897e9f392ea032", "skeleton": "<|skeleton|>\nclass Solution:\n \"\"\"https://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标\"\"\"\n\n def two_sum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n <|body_0|>\n\n def two_sum2(self, nums, target):\n \"\"\"第二种写法\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n \"\"\"https://leetcode.com/problems/two-sum/ 给一个数组nums,和一个目标数target,求两个数之和为target的下标\"\"\"\n\n def two_sum(self, nums, target):\n \"\"\":type nums: List[int] :type target: int :rtype: List[int]\"\"\"\n if nums is None or len(nums) <= 0:\n return\n my_map = {}\n for i in range(len(nums)):\n if target - nums[i] in my_map:\n return list((my_map[target - nums[i]], i))\n else:\n my_map[nums[i]] = i\n return\n\n def two_sum2(self, nums, target):\n \"\"\"第二种写法\"\"\"\n new_nums = nums[:]\n for num in nums:\n new_nums.remove(num)\n if target - num in new_nums:\n if target - num == num:\n return [i for i, x in enumerate(nums) if x == num]\n else:\n return sorted([nums.index(num), nums.index(target - num)])\n new_nums = nums[:]\n", "source": "the_stack_v2_python_sparse", "source_path": "leetcode/TwoSum.py", "source_repo": "Mryangtaofang/pyleetcode", "split": "test", "star_events_count": 0} {"blob_id": "fd8f3415ed59cdec1c04a14f27a25b44753b885d", "bodies": ["self.params = {}\nself.reg = reg\nself.params['W1'] = np.random.normal(loc=0, scale=weight_scale, size=(input_dim, hidden_dim))\nself.params['b1'] = np.zeros(hidden_dim)\nself.params['W2'] = np.random.normal(loc=0, scale=weight_scale, size=(hidden_dim, num_classes))\nself.params['b2'] = np.zeros(num_classes)", "out_1, cache_1 = affine_relu_forward(X, self.params['W1'], self.params['b1'])\nout_2, cache_2 = affine_forward(out_1, self.params['W2'], self.params['b2'])\nscores = out_2\nif y is None:\n return scores\nloss, grads = (0, {})\nloss, d_scores = softmax_loss(scores, y)\ndout_1, grads['W2'], grads['b2'] = affine_backward(d_scores, cache_2)\ndx, grads['W1'], grads['b1'] = affine_relu_backward(dout_1, cache_1)\nw2_reg = 0.5 * self.reg * float(np.tensordot(self.params['W2'], self.params['W2'], axes=((0, 1), (0, 1))))\nw1_reg = 0.5 * self.reg * float(np.tensordot(self.params['W1'], self.params['W1'], axes=((0, 1), (0, 1))))\ndw2_reg = self.reg * self.params['W2']\ndw1_reg = self.reg * self.params['W1']\nloss += w2_reg + w1_reg\ngrads['W2'] += dw2_reg\ngrads['W1'] += dw1_reg\nreturn (loss, grads)"], "bodies_text": "<|body_start_0|>\n self.params = {}\n self.reg = reg\n self.params['W1'] = np.random.normal(loc=0, scale=weight_scale, size=(input_dim, hidden_dim))\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = np.random.normal(loc=0, scale=weight_scale, size=(hidden_dim, num_classes))\n self.params['b2'] = np.zeros(num_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n out_1, cache_1 = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n out_2, cache_2 = affine_forward(out_1, self.params['W2'], self.params['b2'])\n scores = out_2\n if y is None:\n return scores\n loss, grads = (0, {})\n loss, d_scores = softmax_loss(scores, y)\n dout_1, grads['W2'], grads['b2'] = affine_backward(d_scores, cache_2)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dout_1, cache_1)\n w2_reg = 0.5 * self.reg * float(np.tensordot(self.params['W2'], self.params['W2'], axes=((0, 1), (0, 1))))\n w1_reg = 0.5 * self.reg * float(np.tensordot(self.params['W1'], self.params['W1'], axes=((0, 1), (0, 1))))\n dw2_reg = self.reg * self.params['W2']\n dw1_reg = self.reg * self.params['W1']\n loss += w2_reg + w1_reg\n grads['W2'] += dw2_reg\n grads['W1'] += dw1_reg\n return (loss, grads)\n<|end_body_1|>\n", "class_docstring": "A two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.", "class_name": "TwoLayerNet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TwoLayerNet:\n \"\"\"A two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.\"\"\"\n\n def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0):\n \"\"\"Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.\"\"\"\n <|body_0|>\n\n def loss(self, X, y=None):\n \"\"\"Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.params = {}\n self.reg = reg\n self.params['W1'] = np.random.normal(loc=0, scale=weight_scale, size=(input_dim, hidden_dim))\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = np.random.normal(loc=0, scale=weight_scale, size=(hidden_dim, num_classes))\n self.params['b2'] = np.zeros(num_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n out_1, cache_1 = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n out_2, cache_2 = affine_forward(out_1, self.params['W2'], self.params['b2'])\n scores = out_2\n if y is None:\n return scores\n loss, grads = (0, {})\n loss, d_scores = softmax_loss(scores, y)\n dout_1, grads['W2'], grads['b2'] = affine_backward(d_scores, cache_2)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dout_1, cache_1)\n w2_reg = 0.5 * self.reg * float(np.tensordot(self.params['W2'], self.params['W2'], axes=((0, 1), (0, 1))))\n w1_reg = 0.5 * self.reg * float(np.tensordot(self.params['W1'], self.params['W1'], axes=((0, 1), (0, 1))))\n dw2_reg = self.reg * self.params['W2']\n dw1_reg = self.reg * self.params['W1']\n loss += w2_reg + w1_reg\n grads['W2'] += dw2_reg\n grads['W1'] += dw1_reg\n return (loss, grads)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000442", "length_bytes": 16899, "license_type": "permissive", "methods": [{"docstring": "Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.", "name": "__init__", "signature": "def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0)"}, {"docstring": "Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.", "name": "loss", "signature": "def loss(self, X, y=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049823", "prompt": "Implement the Python class `TwoLayerNet` described below.\n\nClass description:\nA two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0): Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.\n- def loss(self, X, y=None): Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.", "prompted_full_text": "Implement the Python class `TwoLayerNet` described below.\n\nClass description:\nA two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.\n\nMethod signatures and docstrings:\n- def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0): Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.\n- def loss(self, X, y=None): Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.\n\n<|skeleton|>\nclass TwoLayerNet:\n \"\"\"A two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.\"\"\"\n\n def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0):\n \"\"\"Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.\"\"\"\n <|body_0|>\n\n def loss(self, X, y=None):\n \"\"\"Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.params = {}\n self.reg = reg\n self.params['W1'] = np.random.normal(loc=0, scale=weight_scale, size=(input_dim, hidden_dim))\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = np.random.normal(loc=0, scale=weight_scale, size=(hidden_dim, num_classes))\n self.params['b2'] = np.zeros(num_classes)\n<|end_body_0|>\n\n<|body_start_1|>\n out_1, cache_1 = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n out_2, cache_2 = affine_forward(out_1, self.params['W2'], self.params['b2'])\n scores = out_2\n if y is None:\n return scores\n loss, grads = (0, {})\n loss, d_scores = softmax_loss(scores, y)\n dout_1, grads['W2'], grads['b2'] = affine_backward(d_scores, cache_2)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dout_1, cache_1)\n w2_reg = 0.5 * self.reg * float(np.tensordot(self.params['W2'], self.params['W2'], axes=((0, 1), (0, 1))))\n w1_reg = 0.5 * self.reg * float(np.tensordot(self.params['W1'], self.params['W1'], axes=((0, 1), (0, 1))))\n dw2_reg = self.reg * self.params['W2']\n dw1_reg = self.reg * self.params['W1']\n loss += w2_reg + w1_reg\n grads['W2'] += dw2_reg\n grads['W1'] += dw1_reg\n return (loss, grads)\n<|end_body_1|>\n", "revision_id": "02d06b3cf329709f4d68292a43659bc79d99c373", "skeleton": "<|skeleton|>\nclass TwoLayerNet:\n \"\"\"A two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.\"\"\"\n\n def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0):\n \"\"\"Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.\"\"\"\n <|body_0|>\n\n def loss(self, X, y=None):\n \"\"\"Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TwoLayerNet:\n \"\"\"A two-layer fully-connected neural network with ReLU non-linearity and softmax loss that uses a modular layer design. We assume an input dimension of D, a hidden dimension of H, and perform classification over C classes. The architecture should be affine - relu - affine - softmax. Note that this class does not implement gradient descent; instead, it will interact with a separate Solver object that is responsible for running optimization. The learnable parameters of the model are stored in the dictionary self.params that maps parameter names to numpy arrays.\"\"\"\n\n def __init__(self, input_dim=3 * 32 * 32, hidden_dim=100, num_classes=10, weight_scale=0.001, reg=0.0):\n \"\"\"Initialize a new network. :param input_dim: An integer giving the size of the input :param hidden_dim: An integer giving the size of the hidden layer :param num_classes: An integer giving the number of classes to classify :param dropout: Scalar between 0 and 1 giving dropout strength. :param weight_scale: Scalar giving the standard deviation for random initialization of the weights. :param reg: Scalar giving L2 regularization strength.\"\"\"\n self.params = {}\n self.reg = reg\n self.params['W1'] = np.random.normal(loc=0, scale=weight_scale, size=(input_dim, hidden_dim))\n self.params['b1'] = np.zeros(hidden_dim)\n self.params['W2'] = np.random.normal(loc=0, scale=weight_scale, size=(hidden_dim, num_classes))\n self.params['b2'] = np.zeros(num_classes)\n\n def loss(self, X, y=None):\n \"\"\"Compute loss and gradient for a mini-batch of data. :param X: Array of input data of shape (N, d_1, ..., d_k) :param y: Array of labels, of shape (N,). y[i] gives the label for X[i]. :return y: If y is None, then run a test-time forward pass of the model and return: - scores: Array of shape (N, C) giving classification scores, where scores[i, c] is the classification score for X[i] and class c. If y is not None, then run a training-time forward and backward pass and return a tuple of: - loss: Scalar value giving the loss - grads: Dictionary with the same keys as self.params, mapping parameter names to gradients of the loss with respect to those parameters.\"\"\"\n out_1, cache_1 = affine_relu_forward(X, self.params['W1'], self.params['b1'])\n out_2, cache_2 = affine_forward(out_1, self.params['W2'], self.params['b2'])\n scores = out_2\n if y is None:\n return scores\n loss, grads = (0, {})\n loss, d_scores = softmax_loss(scores, y)\n dout_1, grads['W2'], grads['b2'] = affine_backward(d_scores, cache_2)\n dx, grads['W1'], grads['b1'] = affine_relu_backward(dout_1, cache_1)\n w2_reg = 0.5 * self.reg * float(np.tensordot(self.params['W2'], self.params['W2'], axes=((0, 1), (0, 1))))\n w1_reg = 0.5 * self.reg * float(np.tensordot(self.params['W1'], self.params['W1'], axes=((0, 1), (0, 1))))\n dw2_reg = self.reg * self.params['W2']\n dw1_reg = self.reg * self.params['W1']\n loss += w2_reg + w1_reg\n grads['W2'] += dw2_reg\n grads['W1'] += dw1_reg\n return (loss, grads)\n", "source": "the_stack_v2_python_sparse", "source_path": "CS231n/Assignment2/classifiers/fc_net.py", "source_repo": "kozistr/ML-Study", "split": "test", "star_events_count": 5} {"blob_id": "530e2acf83a91295abb7856c2c6d9b1b494949a2", "bodies": ["subset_K = len(categories)\nassert subset_K <= K\nsuper().__init__(N, subset_K, **kwargs)\nshuffling = np.arange(K)\nnp.random.shuffle(shuffling)\nshuffling = np.array(list(categories) + [v for v in shuffling if v not in categories])\ninverted_shuffling = np.arange(K)\ninverted_shuffling[shuffling] = np.arange(K)\nself.shuffling = shuffling\nself.inverted_shuffling = inverted_shuffling\nself.subset_K = subset_K", "x = tf.gather(x, self.shuffling, axis=-1)\nx1 = x[..., :self.subset_K]\nx1 = super().call(x1)\nx2 = x[..., self.subset_K:]\nx = tf.concat([x1, x2], axis=-1)\nx = tf.gather(x, self.inverted_shuffling, axis=-1)\nreturn x", "x = tf.gather(x, self.shuffling, axis=-1)\nx1 = x[..., :self.subset_K]\nx1 = super().reverse(x1)\nx2 = x[..., self.subset_K:]\nx = tf.concat([x1, x2], axis=-1)\nx = tf.gather(x, self.inverted_shuffling, axis=-1)\nreturn x"], "bodies_text": "<|body_start_0|>\n subset_K = len(categories)\n assert subset_K <= K\n super().__init__(N, subset_K, **kwargs)\n shuffling = np.arange(K)\n np.random.shuffle(shuffling)\n shuffling = np.array(list(categories) + [v for v in shuffling if v not in categories])\n inverted_shuffling = np.arange(K)\n inverted_shuffling[shuffling] = np.arange(K)\n self.shuffling = shuffling\n self.inverted_shuffling = inverted_shuffling\n self.subset_K = subset_K\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().call(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().reverse(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n<|end_body_2|>\n", "class_docstring": "Discrete shift-only flow acting on subset of categories.", "class_name": "DiscreteFactorizedFlowPartial", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DiscreteFactorizedFlowPartial:\n \"\"\"Discrete shift-only flow acting on subset of categories.\"\"\"\n\n def __init__(self, N, K, categories, **kwargs):\n \"\"\"Args: categories A list of category numbers to be transformed.\"\"\"\n <|body_0|>\n\n def call(self, x):\n \"\"\"Applies flow to first self.subset_K shuffled categories.\"\"\"\n <|body_1|>\n\n def reverse(self, x):\n \"\"\"Applies reverse flow to first self.subset_K shuffled categories.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n subset_K = len(categories)\n assert subset_K <= K\n super().__init__(N, subset_K, **kwargs)\n shuffling = np.arange(K)\n np.random.shuffle(shuffling)\n shuffling = np.array(list(categories) + [v for v in shuffling if v not in categories])\n inverted_shuffling = np.arange(K)\n inverted_shuffling[shuffling] = np.arange(K)\n self.shuffling = shuffling\n self.inverted_shuffling = inverted_shuffling\n self.subset_K = subset_K\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().call(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().reverse(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000443", "length_bytes": 5452, "license_type": "no_license", "methods": [{"docstring": "Args: categories A list of category numbers to be transformed.", "name": "__init__", "signature": "def __init__(self, N, K, categories, **kwargs)"}, {"docstring": "Applies flow to first self.subset_K shuffled categories.", "name": "call", "signature": "def call(self, x)"}, {"docstring": "Applies reverse flow to first self.subset_K shuffled categories.", "name": "reverse", "signature": "def reverse(self, x)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_013238", "prompt": "Implement the Python class `DiscreteFactorizedFlowPartial` described below.\n\nClass description:\nDiscrete shift-only flow acting on subset of categories.\n\nMethod signatures and docstrings:\n- def __init__(self, N, K, categories, **kwargs): Args: categories A list of category numbers to be transformed.\n- def call(self, x): Applies flow to first self.subset_K shuffled categories.\n- def reverse(self, x): Applies reverse flow to first self.subset_K shuffled categories.", "prompted_full_text": "Implement the Python class `DiscreteFactorizedFlowPartial` described below.\n\nClass description:\nDiscrete shift-only flow acting on subset of categories.\n\nMethod signatures and docstrings:\n- def __init__(self, N, K, categories, **kwargs): Args: categories A list of category numbers to be transformed.\n- def call(self, x): Applies flow to first self.subset_K shuffled categories.\n- def reverse(self, x): Applies reverse flow to first self.subset_K shuffled categories.\n\n<|skeleton|>\nclass DiscreteFactorizedFlowPartial:\n \"\"\"Discrete shift-only flow acting on subset of categories.\"\"\"\n\n def __init__(self, N, K, categories, **kwargs):\n \"\"\"Args: categories A list of category numbers to be transformed.\"\"\"\n <|body_0|>\n\n def call(self, x):\n \"\"\"Applies flow to first self.subset_K shuffled categories.\"\"\"\n <|body_1|>\n\n def reverse(self, x):\n \"\"\"Applies reverse flow to first self.subset_K shuffled categories.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n subset_K = len(categories)\n assert subset_K <= K\n super().__init__(N, subset_K, **kwargs)\n shuffling = np.arange(K)\n np.random.shuffle(shuffling)\n shuffling = np.array(list(categories) + [v for v in shuffling if v not in categories])\n inverted_shuffling = np.arange(K)\n inverted_shuffling[shuffling] = np.arange(K)\n self.shuffling = shuffling\n self.inverted_shuffling = inverted_shuffling\n self.subset_K = subset_K\n<|end_body_0|>\n\n<|body_start_1|>\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().call(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().reverse(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n<|end_body_2|>\n", "revision_id": "3e1d8d2c14cbb745b9a498761ebdeaccf7f89962", "skeleton": "<|skeleton|>\nclass DiscreteFactorizedFlowPartial:\n \"\"\"Discrete shift-only flow acting on subset of categories.\"\"\"\n\n def __init__(self, N, K, categories, **kwargs):\n \"\"\"Args: categories A list of category numbers to be transformed.\"\"\"\n <|body_0|>\n\n def call(self, x):\n \"\"\"Applies flow to first self.subset_K shuffled categories.\"\"\"\n <|body_1|>\n\n def reverse(self, x):\n \"\"\"Applies reverse flow to first self.subset_K shuffled categories.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DiscreteFactorizedFlowPartial:\n \"\"\"Discrete shift-only flow acting on subset of categories.\"\"\"\n\n def __init__(self, N, K, categories, **kwargs):\n \"\"\"Args: categories A list of category numbers to be transformed.\"\"\"\n subset_K = len(categories)\n assert subset_K <= K\n super().__init__(N, subset_K, **kwargs)\n shuffling = np.arange(K)\n np.random.shuffle(shuffling)\n shuffling = np.array(list(categories) + [v for v in shuffling if v not in categories])\n inverted_shuffling = np.arange(K)\n inverted_shuffling[shuffling] = np.arange(K)\n self.shuffling = shuffling\n self.inverted_shuffling = inverted_shuffling\n self.subset_K = subset_K\n\n def call(self, x):\n \"\"\"Applies flow to first self.subset_K shuffled categories.\"\"\"\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().call(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n\n def reverse(self, x):\n \"\"\"Applies reverse flow to first self.subset_K shuffled categories.\"\"\"\n x = tf.gather(x, self.shuffling, axis=-1)\n x1 = x[..., :self.subset_K]\n x1 = super().reverse(x1)\n x2 = x[..., self.subset_K:]\n x = tf.concat([x1, x2], axis=-1)\n x = tf.gather(x, self.inverted_shuffling, axis=-1)\n return x\n", "source": "the_stack_v2_python_sparse", "source_path": "mdnf/flows_factorized.py", "source_repo": "tkusmierczyk/mixture_of_discrete_normalizing_flows", "split": "test", "star_events_count": 5} {"blob_id": "007e5f7d7e13511c443b5dae4709037f6a504a55", "bodies": ["super().__init__(num_features, eps=eps)\nif not 0 < decay < 1:\n raise ValueError('decay must be between 0 and 1')\nself.decay = decay\nself.register_buffer('inv_learning_rate', th.empty(()))\nself.register_buffer('num_batches', th.empty((), dtype=th.int))\nEMANorm.reset_running_stats(self)", "super().reset_running_stats()\nself.inv_learning_rate.zero_()\nself.num_batches.zero_()", "b_size = batch.shape[0]\nif len(batch.shape) == 1:\n batch = batch.reshape(b_size, 1)\nself.inv_learning_rate += self.decay ** self.num_batches\nlearning_rate = 1 / self.inv_learning_rate\ndelta_mean = batch.mean(0) - self.running_mean\nself.running_mean += learning_rate * delta_mean\nbatch_var = batch.var(0, unbiased=False)\ndelta_var = batch_var + (1 - learning_rate) * delta_mean ** 2 - self.running_var\nself.running_var += learning_rate * delta_var\nself.count += b_size\nself.num_batches += 1"], "bodies_text": "<|body_start_0|>\n super().__init__(num_features, eps=eps)\n if not 0 < decay < 1:\n raise ValueError('decay must be between 0 and 1')\n self.decay = decay\n self.register_buffer('inv_learning_rate', th.empty(()))\n self.register_buffer('num_batches', th.empty((), dtype=th.int))\n EMANorm.reset_running_stats(self)\n<|end_body_0|>\n\n<|body_start_1|>\n super().reset_running_stats()\n self.inv_learning_rate.zero_()\n self.num_batches.zero_()\n<|end_body_1|>\n\n<|body_start_2|>\n b_size = batch.shape[0]\n if len(batch.shape) == 1:\n batch = batch.reshape(b_size, 1)\n self.inv_learning_rate += self.decay ** self.num_batches\n learning_rate = 1 / self.inv_learning_rate\n delta_mean = batch.mean(0) - self.running_mean\n self.running_mean += learning_rate * delta_mean\n batch_var = batch.var(0, unbiased=False)\n delta_var = batch_var + (1 - learning_rate) * delta_mean ** 2 - self.running_var\n self.running_var += learning_rate * delta_var\n self.count += b_size\n self.num_batches += 1\n<|end_body_2|>\n", "class_docstring": "Similar to RunningNorm but uses an exponential weighting.", "class_name": "EMANorm", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EMANorm:\n \"\"\"Similar to RunningNorm but uses an exponential weighting.\"\"\"\n\n def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05):\n \"\"\"Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.\"\"\"\n <|body_0|>\n\n def reset_running_stats(self):\n \"\"\"Reset the running stats of the normalization layer.\"\"\"\n <|body_1|>\n\n def update_stats(self, batch: th.Tensor) -> None:\n \"\"\"Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(num_features, eps=eps)\n if not 0 < decay < 1:\n raise ValueError('decay must be between 0 and 1')\n self.decay = decay\n self.register_buffer('inv_learning_rate', th.empty(()))\n self.register_buffer('num_batches', th.empty((), dtype=th.int))\n EMANorm.reset_running_stats(self)\n<|end_body_0|>\n\n<|body_start_1|>\n super().reset_running_stats()\n self.inv_learning_rate.zero_()\n self.num_batches.zero_()\n<|end_body_1|>\n\n<|body_start_2|>\n b_size = batch.shape[0]\n if len(batch.shape) == 1:\n batch = batch.reshape(b_size, 1)\n self.inv_learning_rate += self.decay ** self.num_batches\n learning_rate = 1 / self.inv_learning_rate\n delta_mean = batch.mean(0) - self.running_mean\n self.running_mean += learning_rate * delta_mean\n batch_var = batch.var(0, unbiased=False)\n delta_var = batch_var + (1 - learning_rate) * delta_mean ** 2 - self.running_var\n self.running_var += learning_rate * delta_var\n self.count += b_size\n self.num_batches += 1\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000444", "length_bytes": 14535, "license_type": "no_license", "methods": [{"docstring": "Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.", "name": "__init__", "signature": "def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05)"}, {"docstring": "Reset the running stats of the normalization layer.", "name": "reset_running_stats", "signature": "def reset_running_stats(self)"}, {"docstring": "Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.", "name": "update_stats", "signature": "def update_stats(self, batch: th.Tensor) -> None"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_030512", "prompt": "Implement the Python class `EMANorm` described below.\n\nClass description:\nSimilar to RunningNorm but uses an exponential weighting.\n\nMethod signatures and docstrings:\n- def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05): Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.\n- def reset_running_stats(self): Reset the running stats of the normalization layer.\n- def update_stats(self, batch: th.Tensor) -> None: Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.", "prompted_full_text": "Implement the Python class `EMANorm` described below.\n\nClass description:\nSimilar to RunningNorm but uses an exponential weighting.\n\nMethod signatures and docstrings:\n- def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05): Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.\n- def reset_running_stats(self): Reset the running stats of the normalization layer.\n- def update_stats(self, batch: th.Tensor) -> None: Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.\n\n<|skeleton|>\nclass EMANorm:\n \"\"\"Similar to RunningNorm but uses an exponential weighting.\"\"\"\n\n def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05):\n \"\"\"Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.\"\"\"\n <|body_0|>\n\n def reset_running_stats(self):\n \"\"\"Reset the running stats of the normalization layer.\"\"\"\n <|body_1|>\n\n def update_stats(self, batch: th.Tensor) -> None:\n \"\"\"Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(num_features, eps=eps)\n if not 0 < decay < 1:\n raise ValueError('decay must be between 0 and 1')\n self.decay = decay\n self.register_buffer('inv_learning_rate', th.empty(()))\n self.register_buffer('num_batches', th.empty((), dtype=th.int))\n EMANorm.reset_running_stats(self)\n<|end_body_0|>\n\n<|body_start_1|>\n super().reset_running_stats()\n self.inv_learning_rate.zero_()\n self.num_batches.zero_()\n<|end_body_1|>\n\n<|body_start_2|>\n b_size = batch.shape[0]\n if len(batch.shape) == 1:\n batch = batch.reshape(b_size, 1)\n self.inv_learning_rate += self.decay ** self.num_batches\n learning_rate = 1 / self.inv_learning_rate\n delta_mean = batch.mean(0) - self.running_mean\n self.running_mean += learning_rate * delta_mean\n batch_var = batch.var(0, unbiased=False)\n delta_var = batch_var + (1 - learning_rate) * delta_mean ** 2 - self.running_var\n self.running_var += learning_rate * delta_var\n self.count += b_size\n self.num_batches += 1\n<|end_body_2|>\n", "revision_id": "7e55a422588c1d1e00f35a3d3a3ff896cce59e18", "skeleton": "<|skeleton|>\nclass EMANorm:\n \"\"\"Similar to RunningNorm but uses an exponential weighting.\"\"\"\n\n def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05):\n \"\"\"Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.\"\"\"\n <|body_0|>\n\n def reset_running_stats(self):\n \"\"\"Reset the running stats of the normalization layer.\"\"\"\n <|body_1|>\n\n def update_stats(self, batch: th.Tensor) -> None:\n \"\"\"Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EMANorm:\n \"\"\"Similar to RunningNorm but uses an exponential weighting.\"\"\"\n\n def __init__(self, num_features: int, decay: float=0.99, eps: float=1e-05):\n \"\"\"Builds EMARunningNorm. Args: num_features: Number of features; the length of the non-batch dim. decay: how quickly the weight on past samples decays over time. eps: small constant for numerical stability. Raises: ValueError: if decay is out of range.\"\"\"\n super().__init__(num_features, eps=eps)\n if not 0 < decay < 1:\n raise ValueError('decay must be between 0 and 1')\n self.decay = decay\n self.register_buffer('inv_learning_rate', th.empty(()))\n self.register_buffer('num_batches', th.empty((), dtype=th.int))\n EMANorm.reset_running_stats(self)\n\n def reset_running_stats(self):\n \"\"\"Reset the running stats of the normalization layer.\"\"\"\n super().reset_running_stats()\n self.inv_learning_rate.zero_()\n self.num_batches.zero_()\n\n def update_stats(self, batch: th.Tensor) -> None:\n \"\"\"Update `self.running_mean` and `self.running_var` in batch mode. Reference Algorithm 3 from: https://github.com/HumanCompatibleAI/imitation/files/9456540/Incremental_batch_EMA_and_EMV.pdf Args: batch: A batch of data to use to update the running mean and variance.\"\"\"\n b_size = batch.shape[0]\n if len(batch.shape) == 1:\n batch = batch.reshape(b_size, 1)\n self.inv_learning_rate += self.decay ** self.num_batches\n learning_rate = 1 / self.inv_learning_rate\n delta_mean = batch.mean(0) - self.running_mean\n self.running_mean += learning_rate * delta_mean\n batch_var = batch.var(0, unbiased=False)\n delta_var = batch_var + (1 - learning_rate) * delta_mean ** 2 - self.running_var\n self.running_var += learning_rate * delta_var\n self.count += b_size\n self.num_batches += 1\n", "source": "the_stack_v2_python_sparse", "source_path": "generated/test_HumanCompatibleAI_imitation.py", "source_repo": "jansel/pytorch-jit-paritybench", "split": "test", "star_events_count": 35} {"blob_id": "953ed68f6228fe11a000633b8f4c676c36ecf6ff", "bodies": ["self.arrays = np.array(array)\nself.flux1_name = flux1_name\nself.flux2_name = flux2_name\nself.times = times\nself.initial_computes()\nself.dic_label = {0: '$x$', 1: '$y$', 2: '$z$', -1: 'Total', self.dimension: 'Total'}", "self.n_runs, self.dimension, self.n_points = np.shape(self.arrays)\nself.norm = self.dimension * [0]\nself.cor_norm = self.dimension * [0]\nself.averaging()\nfor dim in range(self.dimension):\n normalisation = self.cor[dim][0]\n self.norm[dim] = normalisation\n self.cor_norm[dim] = self.cor[dim] / normalisation", "ddof = 1\narray = self.arrays\nif len(array) == 0:\n ddof = 0\nif isinstance(array[0][0][0], un.UFloat):\n array = unumpy.nominal_values(array)\nself.cor = unumpy.uarray(np.average(array, axis=0), sem(array, axis=0, ddof=ddof))", "if norm == True:\n cor = self.cor_norm[dim]\nelse:\n cor = self.cor[dim]\ny = np.array([i.n for i in cor])\ny_error = np.array([i.s for i in cor])\nax.plot(self.times[::every], y[::every])\nax.fill_between(self.times, y - y_error, y + y_error, alpha=0.4)\nif ax_label == True:\n ax.axhline(y=0, xmin=0, xmax=1, ls='--', c='black')\n ax.set_ylabel('$\\\\langle %s(t)%s(0) \\\\rangle$' % (self.flux1_name, self.flux2_name))\n ax.set_xlabel('$t$ ')\nreturn (fig, ax)", "for array in self.arrays:\n cor = array[dim]\n if isinstance(cor[0], un.UFloat):\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n times = self.times[::every]\n y = y[::every]\n y_error = y_error[::every]\n ax.plot(times, y, label=self.dic_label[dim])\n ax.fill_between(times, y - y_error, y + y_error, alpha=0.4)\n else:\n ax.plot(self.times[::every], cor[::every], label=self.dic_label[dim])\nreturn ax"], "bodies_text": "<|body_start_0|>\n self.arrays = np.array(array)\n self.flux1_name = flux1_name\n self.flux2_name = flux2_name\n self.times = times\n self.initial_computes()\n self.dic_label = {0: '$x$', 1: '$y$', 2: '$z$', -1: 'Total', self.dimension: 'Total'}\n<|end_body_0|>\n\n<|body_start_1|>\n self.n_runs, self.dimension, self.n_points = np.shape(self.arrays)\n self.norm = self.dimension * [0]\n self.cor_norm = self.dimension * [0]\n self.averaging()\n for dim in range(self.dimension):\n normalisation = self.cor[dim][0]\n self.norm[dim] = normalisation\n self.cor_norm[dim] = self.cor[dim] / normalisation\n<|end_body_1|>\n\n<|body_start_2|>\n ddof = 1\n array = self.arrays\n if len(array) == 0:\n ddof = 0\n if isinstance(array[0][0][0], un.UFloat):\n array = unumpy.nominal_values(array)\n self.cor = unumpy.uarray(np.average(array, axis=0), sem(array, axis=0, ddof=ddof))\n<|end_body_2|>\n\n<|body_start_3|>\n if norm == True:\n cor = self.cor_norm[dim]\n else:\n cor = self.cor[dim]\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n ax.plot(self.times[::every], y[::every])\n ax.fill_between(self.times, y - y_error, y + y_error, alpha=0.4)\n if ax_label == True:\n ax.axhline(y=0, xmin=0, xmax=1, ls='--', c='black')\n ax.set_ylabel('$\\\\langle %s(t)%s(0) \\\\rangle$' % (self.flux1_name, self.flux2_name))\n ax.set_xlabel('$t$ ')\n return (fig, ax)\n<|end_body_3|>\n\n<|body_start_4|>\n for array in self.arrays:\n cor = array[dim]\n if isinstance(cor[0], un.UFloat):\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n times = self.times[::every]\n y = y[::every]\n y_error = y_error[::every]\n ax.plot(times, y, label=self.dic_label[dim])\n ax.fill_between(times, y - y_error, y + y_error, alpha=0.4)\n else:\n ax.plot(self.times[::every], cor[::every], label=self.dic_label[dim])\n return ax\n<|end_body_4|>\n", "class_docstring": "The bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.", "class_name": "bundle_correlation", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass bundle_correlation:\n \"\"\"The bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.\"\"\"\n\n def __init__(self, array, times, flux1_name, flux2_name):\n \"\"\"Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total\"\"\"\n <|body_0|>\n\n def initial_computes(self):\n \"\"\"Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.\"\"\"\n <|body_1|>\n\n def averaging(self):\n \"\"\"np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.\"\"\"\n <|body_2|>\n\n def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True):\n \"\"\"Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax\"\"\"\n <|body_3|>\n\n def plot_bundle(self, ax, dim, alpha=0.4, every=1):\n \"\"\"Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.arrays = np.array(array)\n self.flux1_name = flux1_name\n self.flux2_name = flux2_name\n self.times = times\n self.initial_computes()\n self.dic_label = {0: '$x$', 1: '$y$', 2: '$z$', -1: 'Total', self.dimension: 'Total'}\n<|end_body_0|>\n\n<|body_start_1|>\n self.n_runs, self.dimension, self.n_points = np.shape(self.arrays)\n self.norm = self.dimension * [0]\n self.cor_norm = self.dimension * [0]\n self.averaging()\n for dim in range(self.dimension):\n normalisation = self.cor[dim][0]\n self.norm[dim] = normalisation\n self.cor_norm[dim] = self.cor[dim] / normalisation\n<|end_body_1|>\n\n<|body_start_2|>\n ddof = 1\n array = self.arrays\n if len(array) == 0:\n ddof = 0\n if isinstance(array[0][0][0], un.UFloat):\n array = unumpy.nominal_values(array)\n self.cor = unumpy.uarray(np.average(array, axis=0), sem(array, axis=0, ddof=ddof))\n<|end_body_2|>\n\n<|body_start_3|>\n if norm == True:\n cor = self.cor_norm[dim]\n else:\n cor = self.cor[dim]\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n ax.plot(self.times[::every], y[::every])\n ax.fill_between(self.times, y - y_error, y + y_error, alpha=0.4)\n if ax_label == True:\n ax.axhline(y=0, xmin=0, xmax=1, ls='--', c='black')\n ax.set_ylabel('$\\\\langle %s(t)%s(0) \\\\rangle$' % (self.flux1_name, self.flux2_name))\n ax.set_xlabel('$t$ ')\n return (fig, ax)\n<|end_body_3|>\n\n<|body_start_4|>\n for array in self.arrays:\n cor = array[dim]\n if isinstance(cor[0], un.UFloat):\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n times = self.times[::every]\n y = y[::every]\n y_error = y_error[::every]\n ax.plot(times, y, label=self.dic_label[dim])\n ax.fill_between(times, y - y_error, y + y_error, alpha=0.4)\n else:\n ax.plot(self.times[::every], cor[::every], label=self.dic_label[dim])\n return ax\n<|end_body_4|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000445", "length_bytes": 18505, "license_type": "no_license", "methods": [{"docstring": "Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total", "name": "__init__", "signature": "def __init__(self, array, times, flux1_name, flux2_name)"}, {"docstring": "Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.", "name": "initial_computes", "signature": "def initial_computes(self)"}, {"docstring": "np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.", "name": "averaging", "signature": "def averaging(self)"}, {"docstring": "Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax", "name": "plot", "signature": "def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True)"}, {"docstring": "Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later", "name": "plot_bundle", "signature": "def plot_bundle(self, ax, dim, alpha=0.4, every=1)"}], "n_methods": 5, "original_id": "stack_v2_sparse_classes_30k_train_010458", "prompt": "Implement the Python class `bundle_correlation` described below.\n\nClass description:\nThe bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.\n\nMethod signatures and docstrings:\n- def __init__(self, array, times, flux1_name, flux2_name): Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total\n- def initial_computes(self): Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.\n- def averaging(self): np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.\n- def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True): Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax\n- def plot_bundle(self, ax, dim, alpha=0.4, every=1): Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later", "prompted_full_text": "Implement the Python class `bundle_correlation` described below.\n\nClass description:\nThe bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.\n\nMethod signatures and docstrings:\n- def __init__(self, array, times, flux1_name, flux2_name): Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total\n- def initial_computes(self): Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.\n- def averaging(self): np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.\n- def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True): Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax\n- def plot_bundle(self, ax, dim, alpha=0.4, every=1): Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later\n\n<|skeleton|>\nclass bundle_correlation:\n \"\"\"The bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.\"\"\"\n\n def __init__(self, array, times, flux1_name, flux2_name):\n \"\"\"Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total\"\"\"\n <|body_0|>\n\n def initial_computes(self):\n \"\"\"Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.\"\"\"\n <|body_1|>\n\n def averaging(self):\n \"\"\"np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.\"\"\"\n <|body_2|>\n\n def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True):\n \"\"\"Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax\"\"\"\n <|body_3|>\n\n def plot_bundle(self, ax, dim, alpha=0.4, every=1):\n \"\"\"Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later\"\"\"\n <|body_4|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.arrays = np.array(array)\n self.flux1_name = flux1_name\n self.flux2_name = flux2_name\n self.times = times\n self.initial_computes()\n self.dic_label = {0: '$x$', 1: '$y$', 2: '$z$', -1: 'Total', self.dimension: 'Total'}\n<|end_body_0|>\n\n<|body_start_1|>\n self.n_runs, self.dimension, self.n_points = np.shape(self.arrays)\n self.norm = self.dimension * [0]\n self.cor_norm = self.dimension * [0]\n self.averaging()\n for dim in range(self.dimension):\n normalisation = self.cor[dim][0]\n self.norm[dim] = normalisation\n self.cor_norm[dim] = self.cor[dim] / normalisation\n<|end_body_1|>\n\n<|body_start_2|>\n ddof = 1\n array = self.arrays\n if len(array) == 0:\n ddof = 0\n if isinstance(array[0][0][0], un.UFloat):\n array = unumpy.nominal_values(array)\n self.cor = unumpy.uarray(np.average(array, axis=0), sem(array, axis=0, ddof=ddof))\n<|end_body_2|>\n\n<|body_start_3|>\n if norm == True:\n cor = self.cor_norm[dim]\n else:\n cor = self.cor[dim]\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n ax.plot(self.times[::every], y[::every])\n ax.fill_between(self.times, y - y_error, y + y_error, alpha=0.4)\n if ax_label == True:\n ax.axhline(y=0, xmin=0, xmax=1, ls='--', c='black')\n ax.set_ylabel('$\\\\langle %s(t)%s(0) \\\\rangle$' % (self.flux1_name, self.flux2_name))\n ax.set_xlabel('$t$ ')\n return (fig, ax)\n<|end_body_3|>\n\n<|body_start_4|>\n for array in self.arrays:\n cor = array[dim]\n if isinstance(cor[0], un.UFloat):\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n times = self.times[::every]\n y = y[::every]\n y_error = y_error[::every]\n ax.plot(times, y, label=self.dic_label[dim])\n ax.fill_between(times, y - y_error, y + y_error, alpha=0.4)\n else:\n ax.plot(self.times[::every], cor[::every], label=self.dic_label[dim])\n return ax\n<|end_body_4|>\n", "revision_id": "a86e72787059e511983cd047f3027aa10eba7090", "skeleton": "<|skeleton|>\nclass bundle_correlation:\n \"\"\"The bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.\"\"\"\n\n def __init__(self, array, times, flux1_name, flux2_name):\n \"\"\"Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total\"\"\"\n <|body_0|>\n\n def initial_computes(self):\n \"\"\"Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.\"\"\"\n <|body_1|>\n\n def averaging(self):\n \"\"\"np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.\"\"\"\n <|body_2|>\n\n def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True):\n \"\"\"Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax\"\"\"\n <|body_3|>\n\n def plot_bundle(self, ax, dim, alpha=0.4, every=1):\n \"\"\"Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later\"\"\"\n <|body_4|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class bundle_correlation:\n \"\"\"The bundle is made of an array of correlations (correlation.cor), it is not a bundle of correlation objects as this might become very expensive.\"\"\"\n\n def __init__(self, array, times, flux1_name, flux2_name):\n \"\"\"Args: array: contanining all the correlations times: times at which the correlations where measured. Couls be taken from one of the correlation instances as instance.times flux1_name: Name of the flux 1, also can be taken from the instance flux2_name: Name of the flux 2, also can be taken from the instance Atributes: cor: contains the correlations as an array, the components are x,y,z, and total in 3D norm: normalisation factor for each dimension t=0 (var1(0) var2(0)) cor_norm: list to store the normalised correlations, the last is the total\"\"\"\n self.arrays = np.array(array)\n self.flux1_name = flux1_name\n self.flux2_name = flux2_name\n self.times = times\n self.initial_computes()\n self.dic_label = {0: '$x$', 1: '$y$', 2: '$z$', -1: 'Total', self.dimension: 'Total'}\n\n def initial_computes(self):\n \"\"\"Gets the shape of the correlation array and calls the averaging, Args: self Returns ------- None.\"\"\"\n self.n_runs, self.dimension, self.n_points = np.shape(self.arrays)\n self.norm = self.dimension * [0]\n self.cor_norm = self.dimension * [0]\n self.averaging()\n for dim in range(self.dimension):\n normalisation = self.cor[dim][0]\n self.norm[dim] = normalisation\n self.cor_norm[dim] = self.cor[dim] / normalisation\n\n def averaging(self):\n \"\"\"np.shape(self.arrays) =[#runs, Dimensions(+average),points Takes the average of the array of correlations. Sometimes the individual correlations have errors, therefore each point has (average + std) At the moment it strips the error from uncertainties and analyses everypoint as independent. Returns ------- None.\"\"\"\n ddof = 1\n array = self.arrays\n if len(array) == 0:\n ddof = 0\n if isinstance(array[0][0][0], un.UFloat):\n array = unumpy.nominal_values(array)\n self.cor = unumpy.uarray(np.average(array, axis=0), sem(array, axis=0, ddof=ddof))\n\n def plot(self, fig, ax, dim=0, alpha=0.4, every=1, ax_label=True, norm=True):\n \"\"\"Args: ax: axes object fig: Figure dim: is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha: is the transparency of the filling every: to not have so many points norm: True if normalised ax_label: The axis label is given here but it could be renamed later Returns: fig, ax\"\"\"\n if norm == True:\n cor = self.cor_norm[dim]\n else:\n cor = self.cor[dim]\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n ax.plot(self.times[::every], y[::every])\n ax.fill_between(self.times, y - y_error, y + y_error, alpha=0.4)\n if ax_label == True:\n ax.axhline(y=0, xmin=0, xmax=1, ls='--', c='black')\n ax.set_ylabel('$\\\\langle %s(t)%s(0) \\\\rangle$' % (self.flux1_name, self.flux2_name))\n ax.set_xlabel('$t$ ')\n return (fig, ax)\n\n def plot_bundle(self, ax, dim, alpha=0.4, every=1):\n \"\"\"Plots all the correlations from the individual runs in the given direction TODO: Add the normalised option Args: ax axes object fig Figure dim is the dimension, for example:in a 3D vector, 0-x, 1-y, 2-z and 3-total. alpha is the transparency of the filling every to not have so many points The axis label is given here but it could be renamed later\"\"\"\n for array in self.arrays:\n cor = array[dim]\n if isinstance(cor[0], un.UFloat):\n y = np.array([i.n for i in cor])\n y_error = np.array([i.s for i in cor])\n times = self.times[::every]\n y = y[::every]\n y_error = y_error[::every]\n ax.plot(times, y, label=self.dic_label[dim])\n ax.fill_between(times, y - y_error, y + y_error, alpha=0.4)\n else:\n ax.plot(self.times[::every], cor[::every], label=self.dic_label[dim])\n return ax\n", "source": "the_stack_v2_python_sparse", "source_path": "Lammps/Pore/EMD/flux_correlation.py", "source_repo": "sramirezh/Utilities", "split": "test", "star_events_count": 4} {"blob_id": "cfb56714cf7dfd03cb4036bd96a7216c6438966b", "bodies": ["if len(nums) == 0:\n return\nleft = 0\ncurrent, maxsum = (0, nums[0])\nfor right in range(len(nums)):\n current += nums[right]\n maxsum = max(maxsum, current)\n while current <= 0 and left <= right:\n current -= nums[left]\n left += 1\nreturn maxsum", "maxnum = nums[0]\nfor i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n maxnum = max(maxnum, nums[i])\nreturn maxnum"], "bodies_text": "<|body_start_0|>\n if len(nums) == 0:\n return\n left = 0\n current, maxsum = (0, nums[0])\n for right in range(len(nums)):\n current += nums[right]\n maxsum = max(maxsum, current)\n while current <= 0 and left <= right:\n current -= nums[left]\n left += 1\n return maxsum\n<|end_body_0|>\n\n<|body_start_1|>\n maxnum = nums[0]\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n maxnum = max(maxnum, nums[i])\n return maxnum\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def maxSubArray_org(self, nums: List[int]) -> int:\n \"\"\"76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;\"\"\"\n <|body_0|>\n\n def maxSubArray(self, nums: List[int]) -> int:\n \"\"\"76.21% 作者:z1m 动态规划,原地修改数组\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) == 0:\n return\n left = 0\n current, maxsum = (0, nums[0])\n for right in range(len(nums)):\n current += nums[right]\n maxsum = max(maxsum, current)\n while current <= 0 and left <= right:\n current -= nums[left]\n left += 1\n return maxsum\n<|end_body_0|>\n\n<|body_start_1|>\n maxnum = nums[0]\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n maxnum = max(maxnum, nums[i])\n return maxnum\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000446", "length_bytes": 1356, "license_type": "no_license", "methods": [{"docstring": "76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;", "name": "maxSubArray_org", "signature": "def maxSubArray_org(self, nums: List[int]) -> int"}, {"docstring": "76.21% 作者:z1m 动态规划,原地修改数组", "name": "maxSubArray", "signature": "def maxSubArray(self, nums: List[int]) -> int"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_037930", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSubArray_org(self, nums: List[int]) -> int: 76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;\n- def maxSubArray(self, nums: List[int]) -> int: 76.21% 作者:z1m 动态规划,原地修改数组", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def maxSubArray_org(self, nums: List[int]) -> int: 76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;\n- def maxSubArray(self, nums: List[int]) -> int: 76.21% 作者:z1m 动态规划,原地修改数组\n\n<|skeleton|>\nclass Solution:\n\n def maxSubArray_org(self, nums: List[int]) -> int:\n \"\"\"76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;\"\"\"\n <|body_0|>\n\n def maxSubArray(self, nums: List[int]) -> int:\n \"\"\"76.21% 作者:z1m 动态规划,原地修改数组\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if len(nums) == 0:\n return\n left = 0\n current, maxsum = (0, nums[0])\n for right in range(len(nums)):\n current += nums[right]\n maxsum = max(maxsum, current)\n while current <= 0 and left <= right:\n current -= nums[left]\n left += 1\n return maxsum\n<|end_body_0|>\n\n<|body_start_1|>\n maxnum = nums[0]\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n maxnum = max(maxnum, nums[i])\n return maxnum\n<|end_body_1|>\n", "revision_id": "b6712c793bbfe443953e7186b5dbd876c01cd9a0", "skeleton": "<|skeleton|>\nclass Solution:\n\n def maxSubArray_org(self, nums: List[int]) -> int:\n \"\"\"76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;\"\"\"\n <|body_0|>\n\n def maxSubArray(self, nums: List[int]) -> int:\n \"\"\"76.21% 作者:z1m 动态规划,原地修改数组\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def maxSubArray_org(self, nums: List[int]) -> int:\n \"\"\"76.21 % 思路 滑窗遍历列表; sum < 0 时候 left+1 直到 与right重合; right + 1,重新计算 current 值;\"\"\"\n if len(nums) == 0:\n return\n left = 0\n current, maxsum = (0, nums[0])\n for right in range(len(nums)):\n current += nums[right]\n maxsum = max(maxsum, current)\n while current <= 0 and left <= right:\n current -= nums[left]\n left += 1\n return maxsum\n\n def maxSubArray(self, nums: List[int]) -> int:\n \"\"\"76.21% 作者:z1m 动态规划,原地修改数组\"\"\"\n maxnum = nums[0]\n for i in range(1, len(nums)):\n if nums[i - 1] > 0:\n nums[i] += nums[i - 1]\n maxnum = max(maxnum, nums[i])\n return maxnum\n", "source": "the_stack_v2_python_sparse", "source_path": "05_leetcode/53.最大子序和.py", "source_repo": "niceNASA/Python-Foundation-Suda", "split": "test", "star_events_count": 0} {"blob_id": "77a4c3b61591ce1f38ba76eefd47aaeb593c86ff", "bodies": ["if name is None:\n name = self.__class__.__name__\nsuper(sppasBaseNumericalTier, self).__init__(name)\nself._accept_multi_tiers = False\nself._accept_no_tiers = False\nself._accept_metadata = False\nself._accept_ctrl_vocab = False\nself._accept_media = False\nself._accept_hierarchy = False\nself._accept_interval = False\nself._accept_disjoint = False\nself._accept_alt_localization = False\nself._accept_alt_tag = False\nself._accept_radius = False\nself._accept_gaps = False\nself._accept_overlaps = False", "lines = load(filename, sg.__encoding__)\nif len(lines) < 7:\n raise AioLineFormatError(len(lines), lines[-1])\nfile_type = sppasBasePraat._parse_string(lines[1])\ntier = self.create_tier(file_type)\nlast_line = len(lines) - 1\ncur_line = 6\nis_long = not lines[5].strip().isdigit()\nwhile cur_line < last_line:\n if is_long:\n cur_line += 1\n if cur_line > len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n midpoint = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n localization = sppasBasePraat.make_point(midpoint)\n cur_line += 1\n if cur_line >= len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n value = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n tag = sppasTag(value, tag_type='float')\n tier.create_annotation(sppasLocation(localization), sppasLabel(tag))\n cur_line += 1", "if self.is_empty():\n raise AioNoTiersError(file_type)\nif len(self) != 1:\n tier = self.find(file_type, case_sensitive=False)\n if tier is None:\n raise AioMultiTiersError('Praat ' + file_type)\nelse:\n tier = self[0]\nif self.is_empty() is True:\n raise AioEmptyTierError('Praat ' + file_type, tier.get_name())\nif tier.is_point() is False:\n raise AioLocationTypeError(file_type, 'intervals')\nmin_time_point = tier.get_first_point()\nmax_time_point = tier.get_last_point()\nwith codecs.open(filename, 'w', sg.__encoding__, buffering=8096) as fp:\n fp.write(sppasBasePraat._serialize_header(file_type, min_time_point.get_midpoint(), max_time_point.get_midpoint()))\n fp.write('points: size = {:d}\\n'.format(len(tier)))\n for a, annotation in enumerate(tier):\n content = 'points [{:d}]:\\n'.format(a + 1)\n content += '\\tnumber = {}\\n'.format(annotation.get_lowest_localization().get_midpoint())\n content += sppasBasePraat._serialize_labels_value(annotation.get_labels())\n fp.write(content)\n fp.close()"], "bodies_text": "<|body_start_0|>\n if name is None:\n name = self.__class__.__name__\n super(sppasBaseNumericalTier, self).__init__(name)\n self._accept_multi_tiers = False\n self._accept_no_tiers = False\n self._accept_metadata = False\n self._accept_ctrl_vocab = False\n self._accept_media = False\n self._accept_hierarchy = False\n self._accept_interval = False\n self._accept_disjoint = False\n self._accept_alt_localization = False\n self._accept_alt_tag = False\n self._accept_radius = False\n self._accept_gaps = False\n self._accept_overlaps = False\n<|end_body_0|>\n\n<|body_start_1|>\n lines = load(filename, sg.__encoding__)\n if len(lines) < 7:\n raise AioLineFormatError(len(lines), lines[-1])\n file_type = sppasBasePraat._parse_string(lines[1])\n tier = self.create_tier(file_type)\n last_line = len(lines) - 1\n cur_line = 6\n is_long = not lines[5].strip().isdigit()\n while cur_line < last_line:\n if is_long:\n cur_line += 1\n if cur_line > len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n midpoint = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n localization = sppasBasePraat.make_point(midpoint)\n cur_line += 1\n if cur_line >= len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n value = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n tag = sppasTag(value, tag_type='float')\n tier.create_annotation(sppasLocation(localization), sppasLabel(tag))\n cur_line += 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self.is_empty():\n raise AioNoTiersError(file_type)\n if len(self) != 1:\n tier = self.find(file_type, case_sensitive=False)\n if tier is None:\n raise AioMultiTiersError('Praat ' + file_type)\n else:\n tier = self[0]\n if self.is_empty() is True:\n raise AioEmptyTierError('Praat ' + file_type, tier.get_name())\n if tier.is_point() is False:\n raise AioLocationTypeError(file_type, 'intervals')\n min_time_point = tier.get_first_point()\n max_time_point = tier.get_last_point()\n with codecs.open(filename, 'w', sg.__encoding__, buffering=8096) as fp:\n fp.write(sppasBasePraat._serialize_header(file_type, min_time_point.get_midpoint(), max_time_point.get_midpoint()))\n fp.write('points: size = {:d}\\n'.format(len(tier)))\n for a, annotation in enumerate(tier):\n content = 'points [{:d}]:\\n'.format(a + 1)\n content += '\\tnumber = {}\\n'.format(annotation.get_lowest_localization().get_midpoint())\n content += sppasBasePraat._serialize_labels_value(annotation.get_labels())\n fp.write(content)\n fp.close()\n<|end_body_2|>\n", "class_docstring": "SPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.", "class_name": "sppasBaseNumericalTier", "detected_licenses": ["GPL-3.0-only", "MIT", "GFDL-1.1-or-later", "GPL-3.0-or-later"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass sppasBaseNumericalTier:\n \"\"\"SPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.\"\"\"\n\n def __init__(self, name=None):\n \"\"\"Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.\"\"\"\n <|body_0|>\n\n def _read(self, filename):\n \"\"\"Read a file of any numerical file type. :param filename: (str) the input file name\"\"\"\n <|body_1|>\n\n def _write(self, filename, file_type):\n \"\"\"Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if name is None:\n name = self.__class__.__name__\n super(sppasBaseNumericalTier, self).__init__(name)\n self._accept_multi_tiers = False\n self._accept_no_tiers = False\n self._accept_metadata = False\n self._accept_ctrl_vocab = False\n self._accept_media = False\n self._accept_hierarchy = False\n self._accept_interval = False\n self._accept_disjoint = False\n self._accept_alt_localization = False\n self._accept_alt_tag = False\n self._accept_radius = False\n self._accept_gaps = False\n self._accept_overlaps = False\n<|end_body_0|>\n\n<|body_start_1|>\n lines = load(filename, sg.__encoding__)\n if len(lines) < 7:\n raise AioLineFormatError(len(lines), lines[-1])\n file_type = sppasBasePraat._parse_string(lines[1])\n tier = self.create_tier(file_type)\n last_line = len(lines) - 1\n cur_line = 6\n is_long = not lines[5].strip().isdigit()\n while cur_line < last_line:\n if is_long:\n cur_line += 1\n if cur_line > len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n midpoint = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n localization = sppasBasePraat.make_point(midpoint)\n cur_line += 1\n if cur_line >= len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n value = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n tag = sppasTag(value, tag_type='float')\n tier.create_annotation(sppasLocation(localization), sppasLabel(tag))\n cur_line += 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self.is_empty():\n raise AioNoTiersError(file_type)\n if len(self) != 1:\n tier = self.find(file_type, case_sensitive=False)\n if tier is None:\n raise AioMultiTiersError('Praat ' + file_type)\n else:\n tier = self[0]\n if self.is_empty() is True:\n raise AioEmptyTierError('Praat ' + file_type, tier.get_name())\n if tier.is_point() is False:\n raise AioLocationTypeError(file_type, 'intervals')\n min_time_point = tier.get_first_point()\n max_time_point = tier.get_last_point()\n with codecs.open(filename, 'w', sg.__encoding__, buffering=8096) as fp:\n fp.write(sppasBasePraat._serialize_header(file_type, min_time_point.get_midpoint(), max_time_point.get_midpoint()))\n fp.write('points: size = {:d}\\n'.format(len(tier)))\n for a, annotation in enumerate(tier):\n content = 'points [{:d}]:\\n'.format(a + 1)\n content += '\\tnumber = {}\\n'.format(annotation.get_lowest_localization().get_midpoint())\n content += sppasBasePraat._serialize_labels_value(annotation.get_labels())\n fp.write(content)\n fp.close()\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000447", "length_bytes": 33289, "license_type": "permissive", "methods": [{"docstring": "Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.", "name": "__init__", "signature": "def __init__(self, name=None)"}, {"docstring": "Read a file of any numerical file type. :param filename: (str) the input file name", "name": "_read", "signature": "def _read(self, filename)"}, {"docstring": "Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)", "name": "_write", "signature": "def _write(self, filename, file_type)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_029871", "prompt": "Implement the Python class `sppasBaseNumericalTier` described below.\n\nClass description:\nSPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.\n\nMethod signatures and docstrings:\n- def __init__(self, name=None): Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.\n- def _read(self, filename): Read a file of any numerical file type. :param filename: (str) the input file name\n- def _write(self, filename, file_type): Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)", "prompted_full_text": "Implement the Python class `sppasBaseNumericalTier` described below.\n\nClass description:\nSPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.\n\nMethod signatures and docstrings:\n- def __init__(self, name=None): Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.\n- def _read(self, filename): Read a file of any numerical file type. :param filename: (str) the input file name\n- def _write(self, filename, file_type): Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)\n\n<|skeleton|>\nclass sppasBaseNumericalTier:\n \"\"\"SPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.\"\"\"\n\n def __init__(self, name=None):\n \"\"\"Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.\"\"\"\n <|body_0|>\n\n def _read(self, filename):\n \"\"\"Read a file of any numerical file type. :param filename: (str) the input file name\"\"\"\n <|body_1|>\n\n def _write(self, filename, file_type):\n \"\"\"Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if name is None:\n name = self.__class__.__name__\n super(sppasBaseNumericalTier, self).__init__(name)\n self._accept_multi_tiers = False\n self._accept_no_tiers = False\n self._accept_metadata = False\n self._accept_ctrl_vocab = False\n self._accept_media = False\n self._accept_hierarchy = False\n self._accept_interval = False\n self._accept_disjoint = False\n self._accept_alt_localization = False\n self._accept_alt_tag = False\n self._accept_radius = False\n self._accept_gaps = False\n self._accept_overlaps = False\n<|end_body_0|>\n\n<|body_start_1|>\n lines = load(filename, sg.__encoding__)\n if len(lines) < 7:\n raise AioLineFormatError(len(lines), lines[-1])\n file_type = sppasBasePraat._parse_string(lines[1])\n tier = self.create_tier(file_type)\n last_line = len(lines) - 1\n cur_line = 6\n is_long = not lines[5].strip().isdigit()\n while cur_line < last_line:\n if is_long:\n cur_line += 1\n if cur_line > len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n midpoint = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n localization = sppasBasePraat.make_point(midpoint)\n cur_line += 1\n if cur_line >= len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n value = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n tag = sppasTag(value, tag_type='float')\n tier.create_annotation(sppasLocation(localization), sppasLabel(tag))\n cur_line += 1\n<|end_body_1|>\n\n<|body_start_2|>\n if self.is_empty():\n raise AioNoTiersError(file_type)\n if len(self) != 1:\n tier = self.find(file_type, case_sensitive=False)\n if tier is None:\n raise AioMultiTiersError('Praat ' + file_type)\n else:\n tier = self[0]\n if self.is_empty() is True:\n raise AioEmptyTierError('Praat ' + file_type, tier.get_name())\n if tier.is_point() is False:\n raise AioLocationTypeError(file_type, 'intervals')\n min_time_point = tier.get_first_point()\n max_time_point = tier.get_last_point()\n with codecs.open(filename, 'w', sg.__encoding__, buffering=8096) as fp:\n fp.write(sppasBasePraat._serialize_header(file_type, min_time_point.get_midpoint(), max_time_point.get_midpoint()))\n fp.write('points: size = {:d}\\n'.format(len(tier)))\n for a, annotation in enumerate(tier):\n content = 'points [{:d}]:\\n'.format(a + 1)\n content += '\\tnumber = {}\\n'.format(annotation.get_lowest_localization().get_midpoint())\n content += sppasBasePraat._serialize_labels_value(annotation.get_labels())\n fp.write(content)\n fp.close()\n<|end_body_2|>\n", "revision_id": "3167b65f576abcc27a8767d24c274a04712bd948", "skeleton": "<|skeleton|>\nclass sppasBaseNumericalTier:\n \"\"\"SPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.\"\"\"\n\n def __init__(self, name=None):\n \"\"\"Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.\"\"\"\n <|body_0|>\n\n def _read(self, filename):\n \"\"\"Read a file of any numerical file type. :param filename: (str) the input file name\"\"\"\n <|body_1|>\n\n def _write(self, filename, file_type):\n \"\"\"Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class sppasBaseNumericalTier:\n \"\"\"SPPAS PitchTier, IntensityTier, etc reader and writer. :author: Brigitte Bigi :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :contact: contact@sppas.org :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi Support of Praat file formats with only one tier of numerical values like pitch, intensity, etc.\"\"\"\n\n def __init__(self, name=None):\n \"\"\"Initialize a new sppasBaseNumericalTier instance. :param name: (str) This transcription name.\"\"\"\n if name is None:\n name = self.__class__.__name__\n super(sppasBaseNumericalTier, self).__init__(name)\n self._accept_multi_tiers = False\n self._accept_no_tiers = False\n self._accept_metadata = False\n self._accept_ctrl_vocab = False\n self._accept_media = False\n self._accept_hierarchy = False\n self._accept_interval = False\n self._accept_disjoint = False\n self._accept_alt_localization = False\n self._accept_alt_tag = False\n self._accept_radius = False\n self._accept_gaps = False\n self._accept_overlaps = False\n\n def _read(self, filename):\n \"\"\"Read a file of any numerical file type. :param filename: (str) the input file name\"\"\"\n lines = load(filename, sg.__encoding__)\n if len(lines) < 7:\n raise AioLineFormatError(len(lines), lines[-1])\n file_type = sppasBasePraat._parse_string(lines[1])\n tier = self.create_tier(file_type)\n last_line = len(lines) - 1\n cur_line = 6\n is_long = not lines[5].strip().isdigit()\n while cur_line < last_line:\n if is_long:\n cur_line += 1\n if cur_line > len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n midpoint = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n localization = sppasBasePraat.make_point(midpoint)\n cur_line += 1\n if cur_line >= len(lines):\n raise AioLineFormatError(cur_line, lines[-1])\n value = sppasBasePraat._parse_float(lines[cur_line], cur_line + 1)\n tag = sppasTag(value, tag_type='float')\n tier.create_annotation(sppasLocation(localization), sppasLabel(tag))\n cur_line += 1\n\n def _write(self, filename, file_type):\n \"\"\"Write a file of the given file type. :param filename: (str) :param file_type: (str) Name of the file type (PitchTier, IntensityTier...)\"\"\"\n if self.is_empty():\n raise AioNoTiersError(file_type)\n if len(self) != 1:\n tier = self.find(file_type, case_sensitive=False)\n if tier is None:\n raise AioMultiTiersError('Praat ' + file_type)\n else:\n tier = self[0]\n if self.is_empty() is True:\n raise AioEmptyTierError('Praat ' + file_type, tier.get_name())\n if tier.is_point() is False:\n raise AioLocationTypeError(file_type, 'intervals')\n min_time_point = tier.get_first_point()\n max_time_point = tier.get_last_point()\n with codecs.open(filename, 'w', sg.__encoding__, buffering=8096) as fp:\n fp.write(sppasBasePraat._serialize_header(file_type, min_time_point.get_midpoint(), max_time_point.get_midpoint()))\n fp.write('points: size = {:d}\\n'.format(len(tier)))\n for a, annotation in enumerate(tier):\n content = 'points [{:d}]:\\n'.format(a + 1)\n content += '\\tnumber = {}\\n'.format(annotation.get_lowest_localization().get_midpoint())\n content += sppasBasePraat._serialize_labels_value(annotation.get_labels())\n fp.write(content)\n fp.close()\n", "source": "the_stack_v2_python_sparse", "source_path": "sppas/sppas/src/anndata/aio/praat.py", "source_repo": "mirfan899/MTTS", "split": "test", "star_events_count": 0} {"blob_id": "505f4e945c775523144a12fea164064ee51e351b", "bodies": ["try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\nexcept UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\nself.inject_response(in_bytes, delay)", "try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\nexcept UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\nself.inject(in_bytes, delay)"], "bodies_text": "<|body_start_0|>\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject_response(in_bytes, delay)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject(in_bytes, delay)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "RemoteConnection", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass RemoteConnection:\n\n def remote_inject_response(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n <|body_0|>\n\n def remote_inject(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject_response(in_bytes, delay)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject(in_bytes, delay)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000448", "length_bytes": 14425, "license_type": "permissive", "methods": [{"docstring": "Simulate remote endpoint that sends response. Response is given as strings.", "name": "remote_inject_response", "signature": "def remote_inject_response(self, input_strings, delay=0.0)"}, {"docstring": "Simulate remote endpoint that sends response. Response is given as strings.", "name": "remote_inject", "signature": "def remote_inject(self, input_strings, delay=0.0)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_006006", "prompt": "Implement the Python class `RemoteConnection` described below.\n\nClass description:\nImplement the RemoteConnection class.\n\nMethod signatures and docstrings:\n- def remote_inject_response(self, input_strings, delay=0.0): Simulate remote endpoint that sends response. Response is given as strings.\n- def remote_inject(self, input_strings, delay=0.0): Simulate remote endpoint that sends response. Response is given as strings.", "prompted_full_text": "Implement the Python class `RemoteConnection` described below.\n\nClass description:\nImplement the RemoteConnection class.\n\nMethod signatures and docstrings:\n- def remote_inject_response(self, input_strings, delay=0.0): Simulate remote endpoint that sends response. Response is given as strings.\n- def remote_inject(self, input_strings, delay=0.0): Simulate remote endpoint that sends response. Response is given as strings.\n\n<|skeleton|>\nclass RemoteConnection:\n\n def remote_inject_response(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n <|body_0|>\n\n def remote_inject(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject_response(in_bytes, delay)\n<|end_body_0|>\n\n<|body_start_1|>\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject(in_bytes, delay)\n<|end_body_1|>\n", "revision_id": "5a7bb06807b6e0124c77040367d0c20f42849a4c", "skeleton": "<|skeleton|>\nclass RemoteConnection:\n\n def remote_inject_response(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n <|body_0|>\n\n def remote_inject(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class RemoteConnection:\n def remote_inject_response(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject_response(in_bytes, delay)\n\n def remote_inject(self, input_strings, delay=0.0):\n \"\"\"Simulate remote endpoint that sends response. Response is given as strings.\"\"\"\n try:\n in_bytes = [data.encode('utf-8') for data in input_strings]\n except UnicodeDecodeError:\n in_bytes = [data.decode('utf-8').encode('utf-8') for data in input_strings]\n self.inject(in_bytes, delay)\n", "source": "the_stack_v2_python_sparse", "source_path": "moler/util/cmds_events_doc.py", "source_repo": "nokia/moler", "split": "test", "star_events_count": 60} {"blob_id": "216972c0a03f2db20e480610cfb5128b413e530d", "bodies": ["if batch_max_steps % hop_size != 0:\n batch_max_steps += -(batch_max_steps % hop_size)\nassert batch_max_steps % hop_size == 0\nself.batch_max_steps = batch_max_steps\nself.batch_max_frames = batch_max_steps // hop_size\nself.hop_size = hop_size\nself.aux_context_window = aux_context_window\nself.use_noise_input = use_noise_input\nself.start_offset = aux_context_window\nself.end_offset = -(self.batch_max_frames + aux_context_window)\nself.mel_threshold = self.batch_max_frames + 2 * aux_context_window", "batch = [self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold]\nxs, cs = ([b[0] for b in batch], [b[1] for b in batch])\nc_lengths = [len(c) for c in cs]\nstart_frames = np.array([np.random.randint(self.start_offset, cl + self.end_offset) for cl in c_lengths])\nx_starts = start_frames * self.hop_size\nx_ends = x_starts + self.batch_max_steps\nc_starts = start_frames - self.aux_context_window\nc_ends = start_frames + self.batch_max_frames + self.aux_context_window\ny_batch = [x[start:end] for x, start, end in zip(xs, x_starts, x_ends)]\nc_batch = [c[start:end] for c, start, end in zip(cs, c_starts, c_ends)]\ny_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1)\nc_batch = torch.tensor(c_batch, dtype=torch.float).transpose(2, 1)\nif self.use_noise_input:\n z_batch = torch.randn(y_batch.size())\n return ((z_batch, c_batch), y_batch)\nelse:\n return ((c_batch,), y_batch)", "if len(x) < len(c) * self.hop_size:\n x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode='edge')\nassert len(x) == len(c) * self.hop_size\nreturn (x, c)"], "bodies_text": "<|body_start_0|>\n if batch_max_steps % hop_size != 0:\n batch_max_steps += -(batch_max_steps % hop_size)\n assert batch_max_steps % hop_size == 0\n self.batch_max_steps = batch_max_steps\n self.batch_max_frames = batch_max_steps // hop_size\n self.hop_size = hop_size\n self.aux_context_window = aux_context_window\n self.use_noise_input = use_noise_input\n self.start_offset = aux_context_window\n self.end_offset = -(self.batch_max_frames + aux_context_window)\n self.mel_threshold = self.batch_max_frames + 2 * aux_context_window\n<|end_body_0|>\n\n<|body_start_1|>\n batch = [self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold]\n xs, cs = ([b[0] for b in batch], [b[1] for b in batch])\n c_lengths = [len(c) for c in cs]\n start_frames = np.array([np.random.randint(self.start_offset, cl + self.end_offset) for cl in c_lengths])\n x_starts = start_frames * self.hop_size\n x_ends = x_starts + self.batch_max_steps\n c_starts = start_frames - self.aux_context_window\n c_ends = start_frames + self.batch_max_frames + self.aux_context_window\n y_batch = [x[start:end] for x, start, end in zip(xs, x_starts, x_ends)]\n c_batch = [c[start:end] for c, start, end in zip(cs, c_starts, c_ends)]\n y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1)\n c_batch = torch.tensor(c_batch, dtype=torch.float).transpose(2, 1)\n if self.use_noise_input:\n z_batch = torch.randn(y_batch.size())\n return ((z_batch, c_batch), y_batch)\n else:\n return ((c_batch,), y_batch)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(x) < len(c) * self.hop_size:\n x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode='edge')\n assert len(x) == len(c) * self.hop_size\n return (x, c)\n<|end_body_2|>\n", "class_docstring": "Customized collater for Pytorch DataLoader in training.", "class_name": "Collater", "detected_licenses": ["Apache-2.0", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Collater:\n \"\"\"Customized collater for Pytorch DataLoader in training.\"\"\"\n\n def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False):\n \"\"\"Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.\"\"\"\n <|body_0|>\n\n def __call__(self, batch):\n \"\"\"Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).\"\"\"\n <|body_1|>\n\n def _adjust_length(self, x, c):\n \"\"\"Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if batch_max_steps % hop_size != 0:\n batch_max_steps += -(batch_max_steps % hop_size)\n assert batch_max_steps % hop_size == 0\n self.batch_max_steps = batch_max_steps\n self.batch_max_frames = batch_max_steps // hop_size\n self.hop_size = hop_size\n self.aux_context_window = aux_context_window\n self.use_noise_input = use_noise_input\n self.start_offset = aux_context_window\n self.end_offset = -(self.batch_max_frames + aux_context_window)\n self.mel_threshold = self.batch_max_frames + 2 * aux_context_window\n<|end_body_0|>\n\n<|body_start_1|>\n batch = [self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold]\n xs, cs = ([b[0] for b in batch], [b[1] for b in batch])\n c_lengths = [len(c) for c in cs]\n start_frames = np.array([np.random.randint(self.start_offset, cl + self.end_offset) for cl in c_lengths])\n x_starts = start_frames * self.hop_size\n x_ends = x_starts + self.batch_max_steps\n c_starts = start_frames - self.aux_context_window\n c_ends = start_frames + self.batch_max_frames + self.aux_context_window\n y_batch = [x[start:end] for x, start, end in zip(xs, x_starts, x_ends)]\n c_batch = [c[start:end] for c, start, end in zip(cs, c_starts, c_ends)]\n y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1)\n c_batch = torch.tensor(c_batch, dtype=torch.float).transpose(2, 1)\n if self.use_noise_input:\n z_batch = torch.randn(y_batch.size())\n return ((z_batch, c_batch), y_batch)\n else:\n return ((c_batch,), y_batch)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(x) < len(c) * self.hop_size:\n x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode='edge')\n assert len(x) == len(c) * self.hop_size\n return (x, c)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000449", "length_bytes": 38795, "license_type": "permissive", "methods": [{"docstring": "Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.", "name": "__init__", "signature": "def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False)"}, {"docstring": "Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).", "name": "__call__", "signature": "def __call__(self, batch)"}, {"docstring": "Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.", "name": "_adjust_length", "signature": "def _adjust_length(self, x, c)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_009116", "prompt": "Implement the Python class `Collater` described below.\n\nClass description:\nCustomized collater for Pytorch DataLoader in training.\n\nMethod signatures and docstrings:\n- def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False): Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.\n- def __call__(self, batch): Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).\n- def _adjust_length(self, x, c): Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.", "prompted_full_text": "Implement the Python class `Collater` described below.\n\nClass description:\nCustomized collater for Pytorch DataLoader in training.\n\nMethod signatures and docstrings:\n- def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False): Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.\n- def __call__(self, batch): Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).\n- def _adjust_length(self, x, c): Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.\n\n<|skeleton|>\nclass Collater:\n \"\"\"Customized collater for Pytorch DataLoader in training.\"\"\"\n\n def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False):\n \"\"\"Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.\"\"\"\n <|body_0|>\n\n def __call__(self, batch):\n \"\"\"Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).\"\"\"\n <|body_1|>\n\n def _adjust_length(self, x, c):\n \"\"\"Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if batch_max_steps % hop_size != 0:\n batch_max_steps += -(batch_max_steps % hop_size)\n assert batch_max_steps % hop_size == 0\n self.batch_max_steps = batch_max_steps\n self.batch_max_frames = batch_max_steps // hop_size\n self.hop_size = hop_size\n self.aux_context_window = aux_context_window\n self.use_noise_input = use_noise_input\n self.start_offset = aux_context_window\n self.end_offset = -(self.batch_max_frames + aux_context_window)\n self.mel_threshold = self.batch_max_frames + 2 * aux_context_window\n<|end_body_0|>\n\n<|body_start_1|>\n batch = [self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold]\n xs, cs = ([b[0] for b in batch], [b[1] for b in batch])\n c_lengths = [len(c) for c in cs]\n start_frames = np.array([np.random.randint(self.start_offset, cl + self.end_offset) for cl in c_lengths])\n x_starts = start_frames * self.hop_size\n x_ends = x_starts + self.batch_max_steps\n c_starts = start_frames - self.aux_context_window\n c_ends = start_frames + self.batch_max_frames + self.aux_context_window\n y_batch = [x[start:end] for x, start, end in zip(xs, x_starts, x_ends)]\n c_batch = [c[start:end] for c, start, end in zip(cs, c_starts, c_ends)]\n y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1)\n c_batch = torch.tensor(c_batch, dtype=torch.float).transpose(2, 1)\n if self.use_noise_input:\n z_batch = torch.randn(y_batch.size())\n return ((z_batch, c_batch), y_batch)\n else:\n return ((c_batch,), y_batch)\n<|end_body_1|>\n\n<|body_start_2|>\n if len(x) < len(c) * self.hop_size:\n x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode='edge')\n assert len(x) == len(c) * self.hop_size\n return (x, c)\n<|end_body_2|>\n", "revision_id": "9d643e88946fc4a24f2d4d073c08b05ea693f4c5", "skeleton": "<|skeleton|>\nclass Collater:\n \"\"\"Customized collater for Pytorch DataLoader in training.\"\"\"\n\n def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False):\n \"\"\"Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.\"\"\"\n <|body_0|>\n\n def __call__(self, batch):\n \"\"\"Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).\"\"\"\n <|body_1|>\n\n def _adjust_length(self, x, c):\n \"\"\"Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Collater:\n \"\"\"Customized collater for Pytorch DataLoader in training.\"\"\"\n\n def __init__(self, batch_max_steps=20480, hop_size=256, aux_context_window=2, use_noise_input=False):\n \"\"\"Initialize customized collater for PyTorch DataLoader. Args: batch_max_steps (int): The maximum length of input signal in batch. hop_size (int): Hop size of auxiliary features. aux_context_window (int): Context window size for auxiliary feature conv. use_noise_input (bool): Whether to use noise input.\"\"\"\n if batch_max_steps % hop_size != 0:\n batch_max_steps += -(batch_max_steps % hop_size)\n assert batch_max_steps % hop_size == 0\n self.batch_max_steps = batch_max_steps\n self.batch_max_frames = batch_max_steps // hop_size\n self.hop_size = hop_size\n self.aux_context_window = aux_context_window\n self.use_noise_input = use_noise_input\n self.start_offset = aux_context_window\n self.end_offset = -(self.batch_max_frames + aux_context_window)\n self.mel_threshold = self.batch_max_frames + 2 * aux_context_window\n\n def __call__(self, batch):\n \"\"\"Convert into batch tensors. Args: batch (list): list of tuple of the pair of audio and features. Returns: Tensor: Gaussian noise batch (B, 1, T). Tensor: Auxiliary feature batch (B, C, T'), where T = (T' - 2 * aux_context_window) * hop_size. Tensor: Target signal batch (B, 1, T).\"\"\"\n batch = [self._adjust_length(*b) for b in batch if len(b[1]) > self.mel_threshold]\n xs, cs = ([b[0] for b in batch], [b[1] for b in batch])\n c_lengths = [len(c) for c in cs]\n start_frames = np.array([np.random.randint(self.start_offset, cl + self.end_offset) for cl in c_lengths])\n x_starts = start_frames * self.hop_size\n x_ends = x_starts + self.batch_max_steps\n c_starts = start_frames - self.aux_context_window\n c_ends = start_frames + self.batch_max_frames + self.aux_context_window\n y_batch = [x[start:end] for x, start, end in zip(xs, x_starts, x_ends)]\n c_batch = [c[start:end] for c, start, end in zip(cs, c_starts, c_ends)]\n y_batch = torch.tensor(y_batch, dtype=torch.float).unsqueeze(1)\n c_batch = torch.tensor(c_batch, dtype=torch.float).transpose(2, 1)\n if self.use_noise_input:\n z_batch = torch.randn(y_batch.size())\n return ((z_batch, c_batch), y_batch)\n else:\n return ((c_batch,), y_batch)\n\n def _adjust_length(self, x, c):\n \"\"\"Adjust the audio and feature lengths. Note: Basically we assume that the length of x and c are adjusted through preprocessing stage, but if we use other library processed features, this process will be needed.\"\"\"\n if len(x) < len(c) * self.hop_size:\n x = np.pad(x, (0, len(c) * self.hop_size - len(x)), mode='edge')\n assert len(x) == len(c) * self.hop_size\n return (x, c)\n", "source": "the_stack_v2_python_sparse", "source_path": "speech/speech_synthesis/vqmivc/pytorch/ParallelWaveGAN/parallel_wavegan/bin/train.py", "source_repo": "Deep-Spark/DeepSparkHub", "split": "test", "star_events_count": 7} {"blob_id": "d6042ec81b5d80b63131d038f9032f8b9d76b978", "bodies": ["WAITLIST = ['__lll_lock_wait', 'futex_abstimed_wait', 'futex_abstimed_wait_cancelable', 'futex_reltimed_wait', 'futex_reltimed_wait_cancelable', 'futex_wait', 'futex_wait_cancelable']\nif is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_mutex'):\n return MutexType.PTHREAD_MUTEX_T\nif is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_rwlock'):\n return MutexType.PTHREAD_RWLOCK_T\nreturn None", "if mutex_type == MutexType.PTHREAD_MUTEX_T:\n return get_pthread_mutex_t_owner_and_address\nif mutex_type == MutexType.PTHREAD_RWLOCK_T:\n return get_pthread_rwlock_t_owner_and_address\nreturn None"], "bodies_text": "<|body_start_0|>\n WAITLIST = ['__lll_lock_wait', 'futex_abstimed_wait', 'futex_abstimed_wait_cancelable', 'futex_reltimed_wait', 'futex_reltimed_wait_cancelable', 'futex_wait', 'futex_wait_cancelable']\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_mutex'):\n return MutexType.PTHREAD_MUTEX_T\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_rwlock'):\n return MutexType.PTHREAD_RWLOCK_T\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if mutex_type == MutexType.PTHREAD_MUTEX_T:\n return get_pthread_mutex_t_owner_and_address\n if mutex_type == MutexType.PTHREAD_RWLOCK_T:\n return get_pthread_rwlock_t_owner_and_address\n return None\n<|end_body_1|>\n", "class_docstring": "Types of mutexes that we can detect deadlocks.", "class_name": "MutexType", "detected_licenses": ["MIT", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MutexType:\n \"\"\"Types of mutexes that we can detect deadlocks.\"\"\"\n\n def get_mutex_type(thread_id, top_line):\n \"\"\"Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.\"\"\"\n <|body_0|>\n\n def get_mutex_owner_and_address_func_for_type(mutex_type):\n \"\"\"Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n WAITLIST = ['__lll_lock_wait', 'futex_abstimed_wait', 'futex_abstimed_wait_cancelable', 'futex_reltimed_wait', 'futex_reltimed_wait_cancelable', 'futex_wait', 'futex_wait_cancelable']\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_mutex'):\n return MutexType.PTHREAD_MUTEX_T\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_rwlock'):\n return MutexType.PTHREAD_RWLOCK_T\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if mutex_type == MutexType.PTHREAD_MUTEX_T:\n return get_pthread_mutex_t_owner_and_address\n if mutex_type == MutexType.PTHREAD_RWLOCK_T:\n return get_pthread_rwlock_t_owner_and_address\n return None\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000450", "length_bytes": 16226, "license_type": "permissive", "methods": [{"docstring": "Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.", "name": "get_mutex_type", "signature": "def get_mutex_type(thread_id, top_line)"}, {"docstring": "Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.", "name": "get_mutex_owner_and_address_func_for_type", "signature": "def get_mutex_owner_and_address_func_for_type(mutex_type)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_025203", "prompt": "Implement the Python class `MutexType` described below.\n\nClass description:\nTypes of mutexes that we can detect deadlocks.\n\nMethod signatures and docstrings:\n- def get_mutex_type(thread_id, top_line): Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.\n- def get_mutex_owner_and_address_func_for_type(mutex_type): Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.", "prompted_full_text": "Implement the Python class `MutexType` described below.\n\nClass description:\nTypes of mutexes that we can detect deadlocks.\n\nMethod signatures and docstrings:\n- def get_mutex_type(thread_id, top_line): Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.\n- def get_mutex_owner_and_address_func_for_type(mutex_type): Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.\n\n<|skeleton|>\nclass MutexType:\n \"\"\"Types of mutexes that we can detect deadlocks.\"\"\"\n\n def get_mutex_type(thread_id, top_line):\n \"\"\"Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.\"\"\"\n <|body_0|>\n\n def get_mutex_owner_and_address_func_for_type(mutex_type):\n \"\"\"Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n WAITLIST = ['__lll_lock_wait', 'futex_abstimed_wait', 'futex_abstimed_wait_cancelable', 'futex_reltimed_wait', 'futex_reltimed_wait_cancelable', 'futex_wait', 'futex_wait_cancelable']\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_mutex'):\n return MutexType.PTHREAD_MUTEX_T\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_rwlock'):\n return MutexType.PTHREAD_RWLOCK_T\n return None\n<|end_body_0|>\n\n<|body_start_1|>\n if mutex_type == MutexType.PTHREAD_MUTEX_T:\n return get_pthread_mutex_t_owner_and_address\n if mutex_type == MutexType.PTHREAD_RWLOCK_T:\n return get_pthread_rwlock_t_owner_and_address\n return None\n<|end_body_1|>\n", "revision_id": "ab45d9b6a7a2a24b2a725447387f36772dd2cc4a", "skeleton": "<|skeleton|>\nclass MutexType:\n \"\"\"Types of mutexes that we can detect deadlocks.\"\"\"\n\n def get_mutex_type(thread_id, top_line):\n \"\"\"Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.\"\"\"\n <|body_0|>\n\n def get_mutex_owner_and_address_func_for_type(mutex_type):\n \"\"\"Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MutexType:\n \"\"\"Types of mutexes that we can detect deadlocks.\"\"\"\n\n def get_mutex_type(thread_id, top_line):\n \"\"\"Returns the probable mutex type, based on the first line of the thread's stack. Returns None if not found.\"\"\"\n WAITLIST = ['__lll_lock_wait', 'futex_abstimed_wait', 'futex_abstimed_wait_cancelable', 'futex_reltimed_wait', 'futex_reltimed_wait_cancelable', 'futex_wait', 'futex_wait_cancelable']\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_mutex'):\n return MutexType.PTHREAD_MUTEX_T\n if is_thread_blocked_with_frame(thread_id, top_line, WAITLIST, 'pthread_rwlock'):\n return MutexType.PTHREAD_RWLOCK_T\n return None\n\n def get_mutex_owner_and_address_func_for_type(mutex_type):\n \"\"\"Returns a function to resolve the mutex owner and address for the given type. The returned function f has the following signature: f: args: (map of thread lwp -> thread id), blocked thread lwp returns: (lwp of thread owning mutex, mutex address) or (None, None) if not found. Returns None if there is no function for this mutex_type.\"\"\"\n if mutex_type == MutexType.PTHREAD_MUTEX_T:\n return get_pthread_mutex_t_owner_and_address\n if mutex_type == MutexType.PTHREAD_RWLOCK_T:\n return get_pthread_rwlock_t_owner_and_address\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "folly/experimental/gdb/deadlock.py", "source_repo": "facebook/folly", "split": "test", "star_events_count": 23991} {"blob_id": "9112f4d3de30b8a46f207bd30b1d73ffb1fcbb12", "bodies": ["self.cards = []\nself.character = charac\nself.name = charac.name\nself.real = real", "dice1 = randint(1, 6)\ndice2 = randint(1, 6)\nreturn (dice1, dice2)", "for i, p in enumerate(players):\n if p == self:\n for j in range(1, len(players)):\n if i + j > len(players) - 1:\n j = -i\n for card in players[i + j].cards:\n print('check card ' + card.name)\n if card in cards:\n return card\n print('Check next player')\n return None"], "bodies_text": "<|body_start_0|>\n self.cards = []\n self.character = charac\n self.name = charac.name\n self.real = real\n<|end_body_0|>\n\n<|body_start_1|>\n dice1 = randint(1, 6)\n dice2 = randint(1, 6)\n return (dice1, dice2)\n<|end_body_1|>\n\n<|body_start_2|>\n for i, p in enumerate(players):\n if p == self:\n for j in range(1, len(players)):\n if i + j > len(players) - 1:\n j = -i\n for card in players[i + j].cards:\n print('check card ' + card.name)\n if card in cards:\n return card\n print('Check next player')\n return None\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Player", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Player:\n\n def __init__(self, charac, real):\n \"\"\"Class initialiser :param charac: Selected character :param boolean real: True if the character is human\"\"\"\n <|body_0|>\n\n def rollDice():\n \"\"\"Rolls the dice :return: a tuple containing the value of each die.\"\"\"\n <|body_1|>\n\n def checkNextPlayer(self, cards, players):\n \"\"\"Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cards = []\n self.character = charac\n self.name = charac.name\n self.real = real\n<|end_body_0|>\n\n<|body_start_1|>\n dice1 = randint(1, 6)\n dice2 = randint(1, 6)\n return (dice1, dice2)\n<|end_body_1|>\n\n<|body_start_2|>\n for i, p in enumerate(players):\n if p == self:\n for j in range(1, len(players)):\n if i + j > len(players) - 1:\n j = -i\n for card in players[i + j].cards:\n print('check card ' + card.name)\n if card in cards:\n return card\n print('Check next player')\n return None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000451", "length_bytes": 6083, "license_type": "no_license", "methods": [{"docstring": "Class initialiser :param charac: Selected character :param boolean real: True if the character is human", "name": "__init__", "signature": "def __init__(self, charac, real)"}, {"docstring": "Rolls the dice :return: a tuple containing the value of each die.", "name": "rollDice", "signature": "def rollDice()"}, {"docstring": "Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None", "name": "checkNextPlayer", "signature": "def checkNextPlayer(self, cards, players)"}], "n_methods": 3, "prompt": "Implement the Python class `Player` described below.\n\nClass description:\nImplement the Player class.\n\nMethod signatures and docstrings:\n- def __init__(self, charac, real): Class initialiser :param charac: Selected character :param boolean real: True if the character is human\n- def rollDice(): Rolls the dice :return: a tuple containing the value of each die.\n- def checkNextPlayer(self, cards, players): Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None", "prompted_full_text": "Implement the Python class `Player` described below.\n\nClass description:\nImplement the Player class.\n\nMethod signatures and docstrings:\n- def __init__(self, charac, real): Class initialiser :param charac: Selected character :param boolean real: True if the character is human\n- def rollDice(): Rolls the dice :return: a tuple containing the value of each die.\n- def checkNextPlayer(self, cards, players): Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None\n\n<|skeleton|>\nclass Player:\n\n def __init__(self, charac, real):\n \"\"\"Class initialiser :param charac: Selected character :param boolean real: True if the character is human\"\"\"\n <|body_0|>\n\n def rollDice():\n \"\"\"Rolls the dice :return: a tuple containing the value of each die.\"\"\"\n <|body_1|>\n\n def checkNextPlayer(self, cards, players):\n \"\"\"Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.cards = []\n self.character = charac\n self.name = charac.name\n self.real = real\n<|end_body_0|>\n\n<|body_start_1|>\n dice1 = randint(1, 6)\n dice2 = randint(1, 6)\n return (dice1, dice2)\n<|end_body_1|>\n\n<|body_start_2|>\n for i, p in enumerate(players):\n if p == self:\n for j in range(1, len(players)):\n if i + j > len(players) - 1:\n j = -i\n for card in players[i + j].cards:\n print('check card ' + card.name)\n if card in cards:\n return card\n print('Check next player')\n return None\n<|end_body_2|>\n", "revision_id": "788c2224d3279d83840a8e1ec10efa8bd2f3e389", "skeleton": "<|skeleton|>\nclass Player:\n\n def __init__(self, charac, real):\n \"\"\"Class initialiser :param charac: Selected character :param boolean real: True if the character is human\"\"\"\n <|body_0|>\n\n def rollDice():\n \"\"\"Rolls the dice :return: a tuple containing the value of each die.\"\"\"\n <|body_1|>\n\n def checkNextPlayer(self, cards, players):\n \"\"\"Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Player:\n def __init__(self, charac, real):\n \"\"\"Class initialiser :param charac: Selected character :param boolean real: True if the character is human\"\"\"\n self.cards = []\n self.character = charac\n self.name = charac.name\n self.real = real\n\n def rollDice():\n \"\"\"Rolls the dice :return: a tuple containing the value of each die.\"\"\"\n dice1 = randint(1, 6)\n dice2 = randint(1, 6)\n return (dice1, dice2)\n\n def checkNextPlayer(self, cards, players):\n \"\"\"Finds the first card in the suggested card list that another player is holding. If no player has the card, None is returned :param cards: List of cards suggested :param players: List of players in the game :return: Card or None\"\"\"\n for i, p in enumerate(players):\n if p == self:\n for j in range(1, len(players)):\n if i + j > len(players) - 1:\n j = -i\n for card in players[i + j].cards:\n print('check card ' + card.name)\n if card in cards:\n return card\n print('Check next player')\n return None\n", "source": "the_stack_v2_python_sparse", "source_path": "player.py", "source_repo": "happenz7/Game-of-Clue", "split": "test", "star_events_count": 0} {"blob_id": "c8cfb2de958f6877e8dd000425c5da770cbbec62", "bodies": ["try:\n booster, format = serve_utils.get_loaded_booster(model_dir, serve_utils.is_ensemble_enabled())\nexcept Exception as e:\n raise ModelLoadInferenceError('Unable to load model: {}'.format(str(e)))\nreturn (booster, format)", "if len(input_data) == 0:\n raise NoContentInferenceError()\ndtest, content_type = serve_utils.parse_content_data(input_data, input_content_type)\nreturn (dtest, content_type)", "booster, model_format = model\ndtest, content_type = data\ntry:\n return serve_utils.predict(booster, model_format, dtest, content_type)\nexcept Exception as e:\n raise BadRequestInferenceError(str(e))", "accept_type = accept.lower()\ntry:\n if accept_type == content_types.CSV or accept_type == 'csv':\n if SAGEMAKER_BATCH:\n return_data = '\\n'.join(map(str, prediction.tolist())) + '\\n'\n else:\n return_data = ','.join(map(str, prediction.tolist()))\n encoded_prediction = return_data.encode('utf-8')\n elif accept_type == content_types.JSON or accept_type == 'json':\n encoded_prediction = encoder.encode(prediction, accept_type)\n else:\n raise ValueError(\"{} is not an accepted Accept type. Please choose one of the following: ['{}', '{}'].\".format(accept, content_types.CSV, content_types.JSON))\nexcept Exception as e:\n raise UnsupportedMediaTypeInferenceError('Encoding to accept type {} failed with exception: {}'.format(accept, e))\nreturn encoded_prediction"], "bodies_text": "<|body_start_0|>\n try:\n booster, format = serve_utils.get_loaded_booster(model_dir, serve_utils.is_ensemble_enabled())\n except Exception as e:\n raise ModelLoadInferenceError('Unable to load model: {}'.format(str(e)))\n return (booster, format)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(input_data) == 0:\n raise NoContentInferenceError()\n dtest, content_type = serve_utils.parse_content_data(input_data, input_content_type)\n return (dtest, content_type)\n<|end_body_1|>\n\n<|body_start_2|>\n booster, model_format = model\n dtest, content_type = data\n try:\n return serve_utils.predict(booster, model_format, dtest, content_type)\n except Exception as e:\n raise BadRequestInferenceError(str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n accept_type = accept.lower()\n try:\n if accept_type == content_types.CSV or accept_type == 'csv':\n if SAGEMAKER_BATCH:\n return_data = '\\n'.join(map(str, prediction.tolist())) + '\\n'\n else:\n return_data = ','.join(map(str, prediction.tolist()))\n encoded_prediction = return_data.encode('utf-8')\n elif accept_type == content_types.JSON or accept_type == 'json':\n encoded_prediction = encoder.encode(prediction, accept_type)\n else:\n raise ValueError(\"{} is not an accepted Accept type. Please choose one of the following: ['{}', '{}'].\".format(accept, content_types.CSV, content_types.JSON))\n except Exception as e:\n raise UnsupportedMediaTypeInferenceError('Encoding to accept type {} failed with exception: {}'.format(accept, e))\n return encoded_prediction\n<|end_body_3|>\n", "class_docstring": "", "class_name": "DefaultXGBoostAlgoModeInferenceHandler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DefaultXGBoostAlgoModeInferenceHandler:\n\n def default_model_fn(self, model_dir):\n \"\"\"Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.\"\"\"\n <|body_0|>\n\n def default_input_fn(self, input_data, input_content_type):\n \"\"\"Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.\"\"\"\n <|body_1|>\n\n def default_predict_fn(self, data, model):\n \"\"\"A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction\"\"\"\n <|body_2|>\n\n def default_output_fn(self, prediction, accept):\n \"\"\"Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n booster, format = serve_utils.get_loaded_booster(model_dir, serve_utils.is_ensemble_enabled())\n except Exception as e:\n raise ModelLoadInferenceError('Unable to load model: {}'.format(str(e)))\n return (booster, format)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(input_data) == 0:\n raise NoContentInferenceError()\n dtest, content_type = serve_utils.parse_content_data(input_data, input_content_type)\n return (dtest, content_type)\n<|end_body_1|>\n\n<|body_start_2|>\n booster, model_format = model\n dtest, content_type = data\n try:\n return serve_utils.predict(booster, model_format, dtest, content_type)\n except Exception as e:\n raise BadRequestInferenceError(str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n accept_type = accept.lower()\n try:\n if accept_type == content_types.CSV or accept_type == 'csv':\n if SAGEMAKER_BATCH:\n return_data = '\\n'.join(map(str, prediction.tolist())) + '\\n'\n else:\n return_data = ','.join(map(str, prediction.tolist()))\n encoded_prediction = return_data.encode('utf-8')\n elif accept_type == content_types.JSON or accept_type == 'json':\n encoded_prediction = encoder.encode(prediction, accept_type)\n else:\n raise ValueError(\"{} is not an accepted Accept type. Please choose one of the following: ['{}', '{}'].\".format(accept, content_types.CSV, content_types.JSON))\n except Exception as e:\n raise UnsupportedMediaTypeInferenceError('Encoding to accept type {} failed with exception: {}'.format(accept, e))\n return encoded_prediction\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000452", "length_bytes": 6195, "license_type": "permissive", "methods": [{"docstring": "Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.", "name": "default_model_fn", "signature": "def default_model_fn(self, model_dir)"}, {"docstring": "Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.", "name": "default_input_fn", "signature": "def default_input_fn(self, input_data, input_content_type)"}, {"docstring": "A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction", "name": "default_predict_fn", "signature": "def default_predict_fn(self, data, model)"}, {"docstring": "Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client", "name": "default_output_fn", "signature": "def default_output_fn(self, prediction, accept)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_047400", "prompt": "Implement the Python class `DefaultXGBoostAlgoModeInferenceHandler` described below.\n\nClass description:\nImplement the DefaultXGBoostAlgoModeInferenceHandler class.\n\nMethod signatures and docstrings:\n- def default_model_fn(self, model_dir): Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.\n- def default_input_fn(self, input_data, input_content_type): Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.\n- def default_predict_fn(self, data, model): A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction\n- def default_output_fn(self, prediction, accept): Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client", "prompted_full_text": "Implement the Python class `DefaultXGBoostAlgoModeInferenceHandler` described below.\n\nClass description:\nImplement the DefaultXGBoostAlgoModeInferenceHandler class.\n\nMethod signatures and docstrings:\n- def default_model_fn(self, model_dir): Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.\n- def default_input_fn(self, input_data, input_content_type): Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.\n- def default_predict_fn(self, data, model): A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction\n- def default_output_fn(self, prediction, accept): Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client\n\n<|skeleton|>\nclass DefaultXGBoostAlgoModeInferenceHandler:\n\n def default_model_fn(self, model_dir):\n \"\"\"Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.\"\"\"\n <|body_0|>\n\n def default_input_fn(self, input_data, input_content_type):\n \"\"\"Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.\"\"\"\n <|body_1|>\n\n def default_predict_fn(self, data, model):\n \"\"\"A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction\"\"\"\n <|body_2|>\n\n def default_output_fn(self, prediction, accept):\n \"\"\"Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n try:\n booster, format = serve_utils.get_loaded_booster(model_dir, serve_utils.is_ensemble_enabled())\n except Exception as e:\n raise ModelLoadInferenceError('Unable to load model: {}'.format(str(e)))\n return (booster, format)\n<|end_body_0|>\n\n<|body_start_1|>\n if len(input_data) == 0:\n raise NoContentInferenceError()\n dtest, content_type = serve_utils.parse_content_data(input_data, input_content_type)\n return (dtest, content_type)\n<|end_body_1|>\n\n<|body_start_2|>\n booster, model_format = model\n dtest, content_type = data\n try:\n return serve_utils.predict(booster, model_format, dtest, content_type)\n except Exception as e:\n raise BadRequestInferenceError(str(e))\n<|end_body_2|>\n\n<|body_start_3|>\n accept_type = accept.lower()\n try:\n if accept_type == content_types.CSV or accept_type == 'csv':\n if SAGEMAKER_BATCH:\n return_data = '\\n'.join(map(str, prediction.tolist())) + '\\n'\n else:\n return_data = ','.join(map(str, prediction.tolist()))\n encoded_prediction = return_data.encode('utf-8')\n elif accept_type == content_types.JSON or accept_type == 'json':\n encoded_prediction = encoder.encode(prediction, accept_type)\n else:\n raise ValueError(\"{} is not an accepted Accept type. Please choose one of the following: ['{}', '{}'].\".format(accept, content_types.CSV, content_types.JSON))\n except Exception as e:\n raise UnsupportedMediaTypeInferenceError('Encoding to accept type {} failed with exception: {}'.format(accept, e))\n return encoded_prediction\n<|end_body_3|>\n", "revision_id": "d2b7e83038956e158d2b07c809026a8ffb2e832c", "skeleton": "<|skeleton|>\nclass DefaultXGBoostAlgoModeInferenceHandler:\n\n def default_model_fn(self, model_dir):\n \"\"\"Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.\"\"\"\n <|body_0|>\n\n def default_input_fn(self, input_data, input_content_type):\n \"\"\"Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.\"\"\"\n <|body_1|>\n\n def default_predict_fn(self, data, model):\n \"\"\"A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction\"\"\"\n <|body_2|>\n\n def default_output_fn(self, prediction, accept):\n \"\"\"Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DefaultXGBoostAlgoModeInferenceHandler:\n def default_model_fn(self, model_dir):\n \"\"\"Load a model. For XGBoost Framework, a default function to load a model is not provided. Users should provide customized model_fn() in script. Args: model_dir: a directory where model is saved. Returns: A XGBoost model. XGBoost model format type.\"\"\"\n try:\n booster, format = serve_utils.get_loaded_booster(model_dir, serve_utils.is_ensemble_enabled())\n except Exception as e:\n raise ModelLoadInferenceError('Unable to load model: {}'.format(str(e)))\n return (booster, format)\n\n def default_input_fn(self, input_data, input_content_type):\n \"\"\"Take request data and de-serializes the data into an object for prediction. When an InvokeEndpoint operation is made against an Endpoint running SageMaker model server, the model server receives two pieces of information: - The request Content-Type, for example \"application/json\" - The request data, which is at most 5 MB (5 * 1024 * 1024 bytes) in size. The input_fn is responsible to take the request data and pre-process it before prediction. Args: input_data (obj): the request data. input_content_type (str): the request Content-Type. XGBoost accepts CSV, LIBSVM, and RECORDIO-PROTOBUF. Returns: (obj): data ready for prediction. For XGBoost, this defaults to DMatrix.\"\"\"\n if len(input_data) == 0:\n raise NoContentInferenceError()\n dtest, content_type = serve_utils.parse_content_data(input_data, input_content_type)\n return (dtest, content_type)\n\n def default_predict_fn(self, data, model):\n \"\"\"A default predict_fn for XGBooost Framework. Calls a model on data deserialized in input_fn. Args: data: input data (DMatrix) for prediction deserialized by input_fn and data content type model: XGBoost model loaded in memory by model_fn, and xgboost model format Returns: a prediction\"\"\"\n booster, model_format = model\n dtest, content_type = data\n try:\n return serve_utils.predict(booster, model_format, dtest, content_type)\n except Exception as e:\n raise BadRequestInferenceError(str(e))\n\n def default_output_fn(self, prediction, accept):\n \"\"\"Return encoded prediction for the response. Args: prediction (obj): prediction returned by predict_fn . accept (str): accept content-type expected by the client. Returns: encoded response for MMS to return to client\"\"\"\n accept_type = accept.lower()\n try:\n if accept_type == content_types.CSV or accept_type == 'csv':\n if SAGEMAKER_BATCH:\n return_data = '\\n'.join(map(str, prediction.tolist())) + '\\n'\n else:\n return_data = ','.join(map(str, prediction.tolist()))\n encoded_prediction = return_data.encode('utf-8')\n elif accept_type == content_types.JSON or accept_type == 'json':\n encoded_prediction = encoder.encode(prediction, accept_type)\n else:\n raise ValueError(\"{} is not an accepted Accept type. Please choose one of the following: ['{}', '{}'].\".format(accept, content_types.CSV, content_types.JSON))\n except Exception as e:\n raise UnsupportedMediaTypeInferenceError('Encoding to accept type {} failed with exception: {}'.format(accept, e))\n return encoded_prediction\n", "source": "the_stack_v2_python_sparse", "source_path": "src/sagemaker_xgboost_container/algorithm_mode/handler_service.py", "source_repo": "aws/sagemaker-xgboost-container", "split": "test", "star_events_count": 107} {"blob_id": "8aeff0b9ef4ccbf9a8a6c374cb3566705965f099", "bodies": ["self.num_filter = num_filter\nself.window_size = window_size\nself.activation = activation\nself.dropout = dropout\nself.regularizer = regularizer\nself.random_seed = random_seed\nself.trainable = trainable\nself.scope = scope\nself.device_spec = get_device_spec(default_gpu_id, num_gpus)\nwith tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n weight_initializer = create_variable_initializer('glorot_uniform', self.m_seed)\n bias_initializer = create_variable_initializer('zero')\n transform_activation = create_activation_function(self.activation)\n gate_activation = create_activation_function('sigmoid')\n self.transform_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=transform_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.gate_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=gate_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed)", "with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n transform, _ = self.dropout_layer(self.transform_layer(input_data), input_mask)\n gate = self.gate_layer(input_data)\n output_highway = transform * gate + input_data * (1 - gate)\n output_mask = input_mask\nreturn (output_highway, output_mask)"], "bodies_text": "<|body_start_0|>\n self.num_filter = num_filter\n self.window_size = window_size\n self.activation = activation\n self.dropout = dropout\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n self.device_spec = get_device_spec(default_gpu_id, num_gpus)\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n weight_initializer = create_variable_initializer('glorot_uniform', self.m_seed)\n bias_initializer = create_variable_initializer('zero')\n transform_activation = create_activation_function(self.activation)\n gate_activation = create_activation_function('sigmoid')\n self.transform_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=transform_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.gate_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=gate_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed)\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n transform, _ = self.dropout_layer(self.transform_layer(input_data), input_mask)\n gate = self.gate_layer(input_data)\n output_highway = transform * gate + input_data * (1 - gate)\n output_mask = input_mask\n return (output_highway, output_mask)\n<|end_body_1|>\n", "class_docstring": "convolutional highway layer", "class_name": "ConvHighway", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConvHighway:\n \"\"\"convolutional highway layer\"\"\"\n\n def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway'):\n \"\"\"initialize convolutional highway layer\"\"\"\n <|body_0|>\n\n def __call__(self, input_data, input_mask):\n \"\"\"call convolutional highway layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_filter = num_filter\n self.window_size = window_size\n self.activation = activation\n self.dropout = dropout\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n self.device_spec = get_device_spec(default_gpu_id, num_gpus)\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n weight_initializer = create_variable_initializer('glorot_uniform', self.m_seed)\n bias_initializer = create_variable_initializer('zero')\n transform_activation = create_activation_function(self.activation)\n gate_activation = create_activation_function('sigmoid')\n self.transform_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=transform_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.gate_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=gate_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed)\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n transform, _ = self.dropout_layer(self.transform_layer(input_data), input_mask)\n gate = self.gate_layer(input_data)\n output_highway = transform * gate + input_data * (1 - gate)\n output_mask = input_mask\n return (output_highway, output_mask)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000453", "length_bytes": 9944, "license_type": "permissive", "methods": [{"docstring": "initialize convolutional highway layer", "name": "__init__", "signature": "def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway')"}, {"docstring": "call convolutional highway layer", "name": "__call__", "signature": "def __call__(self, input_data, input_mask)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_038427", "prompt": "Implement the Python class `ConvHighway` described below.\n\nClass description:\nconvolutional highway layer\n\nMethod signatures and docstrings:\n- def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway'): initialize convolutional highway layer\n- def __call__(self, input_data, input_mask): call convolutional highway layer", "prompted_full_text": "Implement the Python class `ConvHighway` described below.\n\nClass description:\nconvolutional highway layer\n\nMethod signatures and docstrings:\n- def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway'): initialize convolutional highway layer\n- def __call__(self, input_data, input_mask): call convolutional highway layer\n\n<|skeleton|>\nclass ConvHighway:\n \"\"\"convolutional highway layer\"\"\"\n\n def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway'):\n \"\"\"initialize convolutional highway layer\"\"\"\n <|body_0|>\n\n def __call__(self, input_data, input_mask):\n \"\"\"call convolutional highway layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.num_filter = num_filter\n self.window_size = window_size\n self.activation = activation\n self.dropout = dropout\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n self.device_spec = get_device_spec(default_gpu_id, num_gpus)\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n weight_initializer = create_variable_initializer('glorot_uniform', self.m_seed)\n bias_initializer = create_variable_initializer('zero')\n transform_activation = create_activation_function(self.activation)\n gate_activation = create_activation_function('sigmoid')\n self.transform_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=transform_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.gate_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=gate_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed)\n<|end_body_0|>\n\n<|body_start_1|>\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n transform, _ = self.dropout_layer(self.transform_layer(input_data), input_mask)\n gate = self.gate_layer(input_data)\n output_highway = transform * gate + input_data * (1 - gate)\n output_mask = input_mask\n return (output_highway, output_mask)\n<|end_body_1|>\n", "revision_id": "05fcbec15e359e3db86af6c3798c13be8a6c58ee", "skeleton": "<|skeleton|>\nclass ConvHighway:\n \"\"\"convolutional highway layer\"\"\"\n\n def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway'):\n \"\"\"initialize convolutional highway layer\"\"\"\n <|body_0|>\n\n def __call__(self, input_data, input_mask):\n \"\"\"call convolutional highway layer\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConvHighway:\n \"\"\"convolutional highway layer\"\"\"\n\n def __init__(self, num_filter, window_size, activation, dropout, num_gpus=1, default_gpu_id=0, regularizer=None, random_seed=0, trainable=True, scope='conv_highway'):\n \"\"\"initialize convolutional highway layer\"\"\"\n self.num_filter = num_filter\n self.window_size = window_size\n self.activation = activation\n self.dropout = dropout\n self.regularizer = regularizer\n self.random_seed = random_seed\n self.trainable = trainable\n self.scope = scope\n self.device_spec = get_device_spec(default_gpu_id, num_gpus)\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n weight_initializer = create_variable_initializer('glorot_uniform', self.m_seed)\n bias_initializer = create_variable_initializer('zero')\n transform_activation = create_activation_function(self.activation)\n gate_activation = create_activation_function('sigmoid')\n self.transform_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=transform_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.gate_layer = tf.layers.Conv1D(filters=self.num_filter, kernel_size=window_size, strides=1, padding='SAME', activation=gate_activation, use_bias=True, kernel_initializer=weight_initializer, bias_initializer=bias_initializer, kernel_regularizer=self.regularizer, bias_regularizer=self.regularizer, trainable=trainable)\n self.dropout_layer = Dropout(rate=self.dropout, num_gpus=num_gpus, default_gpu_id=default_gpu_id, random_seed=self.random_seed)\n\n def __call__(self, input_data, input_mask):\n \"\"\"call convolutional highway layer\"\"\"\n with tf.variable_scope(self.scope, reuse=tf.AUTO_REUSE), tf.device(self.device_spec):\n transform, _ = self.dropout_layer(self.transform_layer(input_data), input_mask)\n gate = self.gate_layer(input_data)\n output_highway = transform * gate + input_data * (1 - gate)\n output_mask = input_mask\n return (output_highway, output_mask)\n", "source": "the_stack_v2_python_sparse", "source_path": "sequence_labeling/layer/highway.py", "source_repo": "stevezheng23/sequence_labeling_tf", "split": "test", "star_events_count": 18} {"blob_id": "970502a1c131a73b1f53c8e25b312c7a28af5410", "bodies": ["forest_predictions = self._base_estimator_predictions(X)\nif self._models_parameters.normalize_D:\n forest_predictions /= self._forest_norms\nreturn self._omp.predict(forest_predictions, forest_size)", "forest_predictions = np.array([tree.predict(X) for tree in self._base_forest_estimator.estimators_])\nif forest_size is not None:\n weights = self._omp.get_coef(forest_size)\n select_trees = np.mean(forest_predictions[weights != 0], axis=0)\n return select_trees\nelse:\n lst_predictions = []\n for sol in self._omp.get_coef():\n lst_predictions.append(np.mean(forest_predictions[sol != 0], axis=0))\n return lst_predictions", "if forest_size is not None:\n predictions = self.predict(X, forest_size)\n return np.mean(np.square(predictions - y))\nelse:\n predictions = self.predict(X)\n lst_scores = []\n for pred in predictions:\n lst_scores.append(np.mean(np.square(pred - y)))\n return lst_scores"], "bodies_text": "<|body_start_0|>\n forest_predictions = self._base_estimator_predictions(X)\n if self._models_parameters.normalize_D:\n forest_predictions /= self._forest_norms\n return self._omp.predict(forest_predictions, forest_size)\n<|end_body_0|>\n\n<|body_start_1|>\n forest_predictions = np.array([tree.predict(X) for tree in self._base_forest_estimator.estimators_])\n if forest_size is not None:\n weights = self._omp.get_coef(forest_size)\n select_trees = np.mean(forest_predictions[weights != 0], axis=0)\n return select_trees\n else:\n lst_predictions = []\n for sol in self._omp.get_coef():\n lst_predictions.append(np.mean(forest_predictions[sol != 0], axis=0))\n return lst_predictions\n<|end_body_1|>\n\n<|body_start_2|>\n if forest_size is not None:\n predictions = self.predict(X, forest_size)\n return np.mean(np.square(predictions - y))\n else:\n predictions = self.predict(X)\n lst_scores = []\n for pred in predictions:\n lst_scores.append(np.mean(np.square(pred - y)))\n return lst_scores\n<|end_body_2|>\n", "class_docstring": "", "class_name": "NonNegativeOmpForestRegressor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NonNegativeOmpForestRegressor:\n\n def predict(self, X, forest_size=None):\n \"\"\"Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:\"\"\"\n <|body_0|>\n\n def predict_no_weights(self, X, forest_size=None):\n \"\"\"Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight\"\"\"\n <|body_1|>\n\n def score(self, X, y, forest_size=None):\n \"\"\"Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n forest_predictions = self._base_estimator_predictions(X)\n if self._models_parameters.normalize_D:\n forest_predictions /= self._forest_norms\n return self._omp.predict(forest_predictions, forest_size)\n<|end_body_0|>\n\n<|body_start_1|>\n forest_predictions = np.array([tree.predict(X) for tree in self._base_forest_estimator.estimators_])\n if forest_size is not None:\n weights = self._omp.get_coef(forest_size)\n select_trees = np.mean(forest_predictions[weights != 0], axis=0)\n return select_trees\n else:\n lst_predictions = []\n for sol in self._omp.get_coef():\n lst_predictions.append(np.mean(forest_predictions[sol != 0], axis=0))\n return lst_predictions\n<|end_body_1|>\n\n<|body_start_2|>\n if forest_size is not None:\n predictions = self.predict(X, forest_size)\n return np.mean(np.square(predictions - y))\n else:\n predictions = self.predict(X)\n lst_scores = []\n for pred in predictions:\n lst_scores.append(np.mean(np.square(pred - y)))\n return lst_scores\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000454", "length_bytes": 5071, "license_type": "permissive", "methods": [{"docstring": "Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:", "name": "predict", "signature": "def predict(self, X, forest_size=None)"}, {"docstring": "Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight", "name": "predict_no_weights", "signature": "def predict_no_weights(self, X, forest_size=None)"}, {"docstring": "Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:", "name": "score", "signature": "def score(self, X, y, forest_size=None)"}], "n_methods": 3, "prompt": "Implement the Python class `NonNegativeOmpForestRegressor` described below.\n\nClass description:\nImplement the NonNegativeOmpForestRegressor class.\n\nMethod signatures and docstrings:\n- def predict(self, X, forest_size=None): Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:\n- def predict_no_weights(self, X, forest_size=None): Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight\n- def score(self, X, y, forest_size=None): Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:", "prompted_full_text": "Implement the Python class `NonNegativeOmpForestRegressor` described below.\n\nClass description:\nImplement the NonNegativeOmpForestRegressor class.\n\nMethod signatures and docstrings:\n- def predict(self, X, forest_size=None): Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:\n- def predict_no_weights(self, X, forest_size=None): Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight\n- def score(self, X, y, forest_size=None): Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:\n\n<|skeleton|>\nclass NonNegativeOmpForestRegressor:\n\n def predict(self, X, forest_size=None):\n \"\"\"Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:\"\"\"\n <|body_0|>\n\n def predict_no_weights(self, X, forest_size=None):\n \"\"\"Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight\"\"\"\n <|body_1|>\n\n def score(self, X, y, forest_size=None):\n \"\"\"Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n forest_predictions = self._base_estimator_predictions(X)\n if self._models_parameters.normalize_D:\n forest_predictions /= self._forest_norms\n return self._omp.predict(forest_predictions, forest_size)\n<|end_body_0|>\n\n<|body_start_1|>\n forest_predictions = np.array([tree.predict(X) for tree in self._base_forest_estimator.estimators_])\n if forest_size is not None:\n weights = self._omp.get_coef(forest_size)\n select_trees = np.mean(forest_predictions[weights != 0], axis=0)\n return select_trees\n else:\n lst_predictions = []\n for sol in self._omp.get_coef():\n lst_predictions.append(np.mean(forest_predictions[sol != 0], axis=0))\n return lst_predictions\n<|end_body_1|>\n\n<|body_start_2|>\n if forest_size is not None:\n predictions = self.predict(X, forest_size)\n return np.mean(np.square(predictions - y))\n else:\n predictions = self.predict(X)\n lst_scores = []\n for pred in predictions:\n lst_scores.append(np.mean(np.square(pred - y)))\n return lst_scores\n<|end_body_2|>\n", "revision_id": "64ba63c01bd04f4f959d18aff27e245d8fff3403", "skeleton": "<|skeleton|>\nclass NonNegativeOmpForestRegressor:\n\n def predict(self, X, forest_size=None):\n \"\"\"Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:\"\"\"\n <|body_0|>\n\n def predict_no_weights(self, X, forest_size=None):\n \"\"\"Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight\"\"\"\n <|body_1|>\n\n def score(self, X, y, forest_size=None):\n \"\"\"Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NonNegativeOmpForestRegressor:\n def predict(self, X, forest_size=None):\n \"\"\"Make prediction. If forest_size is None return the list of predictions of all intermediate solutions :param X: :return:\"\"\"\n forest_predictions = self._base_estimator_predictions(X)\n if self._models_parameters.normalize_D:\n forest_predictions /= self._forest_norms\n return self._omp.predict(forest_predictions, forest_size)\n\n def predict_no_weights(self, X, forest_size=None):\n \"\"\"Make a prediction of the selected trees but without weight. If forest_size is None return the list of unweighted predictions of all intermediate solutions. :param X: some data to apply the forest to :return: a np.array of the predictions of the trees selected by OMP without applying the weight\"\"\"\n forest_predictions = np.array([tree.predict(X) for tree in self._base_forest_estimator.estimators_])\n if forest_size is not None:\n weights = self._omp.get_coef(forest_size)\n select_trees = np.mean(forest_predictions[weights != 0], axis=0)\n return select_trees\n else:\n lst_predictions = []\n for sol in self._omp.get_coef():\n lst_predictions.append(np.mean(forest_predictions[sol != 0], axis=0))\n return lst_predictions\n\n def score(self, X, y, forest_size=None):\n \"\"\"Evaluate OMPForestClassifer on (`X`, `y`). if Idx_prediction is None return the score of all sub forest.` :param X: :param y: :return:\"\"\"\n if forest_size is not None:\n predictions = self.predict(X, forest_size)\n return np.mean(np.square(predictions - y))\n else:\n predictions = self.predict(X)\n lst_scores = []\n for pred in predictions:\n lst_scores.append(np.mean(np.square(pred - y)))\n return lst_scores\n", "source": "the_stack_v2_python_sparse", "source_path": "code/bolsonaro/models/nn_omp_forest_regressor.py", "source_repo": "swasun/RFOMT", "split": "test", "star_events_count": 2} {"blob_id": "3c1a63bb7f983fe9682c749ecd2c5f7c8cafbec9", "bodies": ["if scale not in ('linear', 'log'):\n raise ValueError('invalid parameter scale: %s' % scale)\nself.name = name\nself.min_value = min_value\nself.max_value = max_value\nself.scale = scale", "if self.scale == 'linear':\n return random.uniform(self.min_value, self.max_value)\nelse:\n log_min_value = math.log(self.min_value)\n log_max_value = math.log(self.max_value)\n return math.exp(random.uniform(log_min_value, log_max_value))"], "bodies_text": "<|body_start_0|>\n if scale not in ('linear', 'log'):\n raise ValueError('invalid parameter scale: %s' % scale)\n self.name = name\n self.min_value = min_value\n self.max_value = max_value\n self.scale = scale\n<|end_body_0|>\n\n<|body_start_1|>\n if self.scale == 'linear':\n return random.uniform(self.min_value, self.max_value)\n else:\n log_min_value = math.log(self.min_value)\n log_max_value = math.log(self.max_value)\n return math.exp(random.uniform(log_min_value, log_max_value))\n<|end_body_1|>\n", "class_docstring": "An audio transform parameter with min and max value.", "class_name": "AudioTransformParameter", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AudioTransformParameter:\n \"\"\"An audio transform parameter with min and max value.\"\"\"\n\n def __init__(self, name, min_value, max_value, scale):\n \"\"\"Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.\"\"\"\n <|body_0|>\n\n def sample(self):\n \"\"\"Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if scale not in ('linear', 'log'):\n raise ValueError('invalid parameter scale: %s' % scale)\n self.name = name\n self.min_value = min_value\n self.max_value = max_value\n self.scale = scale\n<|end_body_0|>\n\n<|body_start_1|>\n if self.scale == 'linear':\n return random.uniform(self.min_value, self.max_value)\n else:\n log_min_value = math.log(self.min_value)\n log_max_value = math.log(self.max_value)\n return math.exp(random.uniform(log_min_value, log_max_value))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000455", "length_bytes": 9154, "license_type": "permissive", "methods": [{"docstring": "Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.", "name": "__init__", "signature": "def __init__(self, name, min_value, max_value, scale)"}, {"docstring": "Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.", "name": "sample", "signature": "def sample(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_010328", "prompt": "Implement the Python class `AudioTransformParameter` described below.\n\nClass description:\nAn audio transform parameter with min and max value.\n\nMethod signatures and docstrings:\n- def __init__(self, name, min_value, max_value, scale): Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.\n- def sample(self): Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.", "prompted_full_text": "Implement the Python class `AudioTransformParameter` described below.\n\nClass description:\nAn audio transform parameter with min and max value.\n\nMethod signatures and docstrings:\n- def __init__(self, name, min_value, max_value, scale): Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.\n- def sample(self): Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.\n\n<|skeleton|>\nclass AudioTransformParameter:\n \"\"\"An audio transform parameter with min and max value.\"\"\"\n\n def __init__(self, name, min_value, max_value, scale):\n \"\"\"Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.\"\"\"\n <|body_0|>\n\n def sample(self):\n \"\"\"Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if scale not in ('linear', 'log'):\n raise ValueError('invalid parameter scale: %s' % scale)\n self.name = name\n self.min_value = min_value\n self.max_value = max_value\n self.scale = scale\n<|end_body_0|>\n\n<|body_start_1|>\n if self.scale == 'linear':\n return random.uniform(self.min_value, self.max_value)\n else:\n log_min_value = math.log(self.min_value)\n log_max_value = math.log(self.max_value)\n return math.exp(random.uniform(log_min_value, log_max_value))\n<|end_body_1|>\n", "revision_id": "548dc4e2e6a8e3ac65e1921bd94fe589d661d47b", "skeleton": "<|skeleton|>\nclass AudioTransformParameter:\n \"\"\"An audio transform parameter with min and max value.\"\"\"\n\n def __init__(self, name, min_value, max_value, scale):\n \"\"\"Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.\"\"\"\n <|body_0|>\n\n def sample(self):\n \"\"\"Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AudioTransformParameter:\n \"\"\"An audio transform parameter with min and max value.\"\"\"\n\n def __init__(self, name, min_value, max_value, scale):\n \"\"\"Initialize an AudioTransformParameter. Args: name: The name of the parameter. Should be the same as the name of the parameter passed to sox. min_value: The minimum value of the parameter, a float. max_value: The maximum value of the parameter, a float. scale: 'linear' or 'log', the scale with which to sample the parameter value. Raises: ValueError: If `scale` is not 'linear' or 'log'.\"\"\"\n if scale not in ('linear', 'log'):\n raise ValueError('invalid parameter scale: %s' % scale)\n self.name = name\n self.min_value = min_value\n self.max_value = max_value\n self.scale = scale\n\n def sample(self):\n \"\"\"Sample the parameter, returning a random value in its range. Returns: A value drawn uniformly at random between `min_value` and `max_value`.\"\"\"\n if self.scale == 'linear':\n return random.uniform(self.min_value, self.max_value)\n else:\n log_min_value = math.log(self.min_value)\n log_max_value = math.log(self.max_value)\n return math.exp(random.uniform(log_min_value, log_max_value))\n", "source": "the_stack_v2_python_sparse", "source_path": "magenta/models/onsets_frames_transcription/audio_transform.py", "source_repo": "magenta/magenta", "split": "test", "star_events_count": 4142} {"blob_id": "e864bffcad4ce5cf8ab33a706a3beb9ac7a936b0", "bodies": ["q = set([(r, c)])\nP = [[0 for _ in range(N)] for _ in range(N)]\nP[r][c] = 1\nk = 0\nwhile k < K:\n k += 1\n cur_q = set()\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.add((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\nreturn sum([P[i][j] for i in range(N) for j in range(N)])", "q = [(r, c)]\nP = [[0 for _ in range(N)] for _ in range(N)]\nP[r][c] = 1\nk = 0\nwhile k < K:\n k += 1\n cur_q = []\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.append((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\nreturn sum([P[i][j] for i in range(N) for j in range(N)])"], "bodies_text": "<|body_start_0|>\n q = set([(r, c)])\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = set()\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.add((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n<|end_body_0|>\n\n<|body_start_1|>\n q = [(r, c)]\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = []\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.append((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step with memory, it is considered dp\"\"\"\n <|body_0|>\n\n def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n q = set([(r, c)])\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = set()\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.add((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n<|end_body_0|>\n\n<|body_start_1|>\n q = [(r, c)]\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = []\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.append((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000456", "length_bytes": 3087, "license_type": "no_license", "methods": [{"docstring": "brute force K step with memory, it is considered dp", "name": "knightProbability", "signature": "def knightProbability(self, N: int, K: int, r: int, c: int) -> float"}, {"docstring": "brute force K step", "name": "knightProbability_error", "signature": "def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049615", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def knightProbability(self, N: int, K: int, r: int, c: int) -> float: brute force K step with memory, it is considered dp\n- def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float: brute force K step", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def knightProbability(self, N: int, K: int, r: int, c: int) -> float: brute force K step with memory, it is considered dp\n- def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float: brute force K step\n\n<|skeleton|>\nclass Solution:\n\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step with memory, it is considered dp\"\"\"\n <|body_0|>\n\n def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n q = set([(r, c)])\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = set()\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.add((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n<|end_body_0|>\n\n<|body_start_1|>\n q = [(r, c)]\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = []\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.append((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n<|end_body_1|>\n", "revision_id": "929dde1723fb2f54870c8a9badc80fc23e8400d3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step with memory, it is considered dp\"\"\"\n <|body_0|>\n\n def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def knightProbability(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step with memory, it is considered dp\"\"\"\n q = set([(r, c)])\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = set()\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.add((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n\n def knightProbability_error(self, N: int, K: int, r: int, c: int) -> float:\n \"\"\"brute force K step\"\"\"\n q = [(r, c)]\n P = [[0 for _ in range(N)] for _ in range(N)]\n P[r][c] = 1\n k = 0\n while k < K:\n k += 1\n cur_q = []\n cur_P = [[0 for _ in range(N)] for _ in range(N)]\n for i, j in q:\n for di, dj in dirs:\n I = i + di\n J = j + dj\n if 0 <= I < N and 0 <= J < N:\n cur_q.append((I, J))\n cur_P[I][J] += P[i][j] * 1 / 8\n q = cur_q\n P = cur_P\n return sum([P[i][j] for i in range(N) for j in range(N)])\n", "source": "the_stack_v2_python_sparse", "source_path": "_algorithms_challenges/leetcode/LeetCode/688 Knight Probability in Chessboard.py", "source_repo": "syurskyi/Algorithms_and_Data_Structure", "split": "test", "star_events_count": 4} {"blob_id": "10f5742e589b082d55e1630da084693b303a3e57", "bodies": ["builds = self.get(self.url)\nself.assertEqual(len(builds.data), 5)\nbuilds = self.get(self.url, data={'active': True})\nself.assertEqual(len(builds.data), 1)\nbuilds = self.get(self.url, data={'status': BuildStatus.COMPLETE.value})\nself.assertEqual(len(builds.data), 4)\nbuilds = self.get(self.url, data={'overdue': False})\nself.assertEqual(len(builds.data), 5)\nbuilds = self.get(self.url, data={'overdue': True})\nself.assertEqual(len(builds.data), 0)", "in_the_past = datetime.now().date() - timedelta(days=50)\npart = Part.objects.get(pk=50)\nBuild.objects.create(part=part, reference='BO-0006', quantity=10, title='Just some thing', status=BuildStatus.PRODUCTION.value, target_date=in_the_past)\nresponse = self.get(self.url, data={'overdue': True})\nbuilds = response.data\nself.assertEqual(len(builds), 1)", "parent = Build.objects.get(pk=5)\npart = Part.objects.get(pk=50)\nn = Build.objects.count()\nfor i in range(5):\n Build.objects.create(part=part, quantity=10, reference=f'BO-{i + 10}', title=f'Sub build {i}', parent=parent)\nfor ii, sub_build in enumerate(Build.objects.filter(parent=parent)):\n for i in range(3):\n x = ii * 10 + i + 50\n Build.objects.create(part=part, reference=f'BO-{x}', title=f'{sub_build.reference}-00{i}-sub', quantity=40, parent=sub_build)\nself.assertEqual(Build.objects.count(), n + 20)\nBuild.objects.rebuild()\nresponse = self.get(self.url, data={'parent': parent.pk})\nbuilds = response.data\nself.assertEqual(len(builds), 5)\nresponse = self.get(self.url, data={'ancestor': parent.pk})\nbuilds = response.data\nself.assertEqual(len(builds), 20)"], "bodies_text": "<|body_start_0|>\n builds = self.get(self.url)\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'active': True})\n self.assertEqual(len(builds.data), 1)\n builds = self.get(self.url, data={'status': BuildStatus.COMPLETE.value})\n self.assertEqual(len(builds.data), 4)\n builds = self.get(self.url, data={'overdue': False})\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'overdue': True})\n self.assertEqual(len(builds.data), 0)\n<|end_body_0|>\n\n<|body_start_1|>\n in_the_past = datetime.now().date() - timedelta(days=50)\n part = Part.objects.get(pk=50)\n Build.objects.create(part=part, reference='BO-0006', quantity=10, title='Just some thing', status=BuildStatus.PRODUCTION.value, target_date=in_the_past)\n response = self.get(self.url, data={'overdue': True})\n builds = response.data\n self.assertEqual(len(builds), 1)\n<|end_body_1|>\n\n<|body_start_2|>\n parent = Build.objects.get(pk=5)\n part = Part.objects.get(pk=50)\n n = Build.objects.count()\n for i in range(5):\n Build.objects.create(part=part, quantity=10, reference=f'BO-{i + 10}', title=f'Sub build {i}', parent=parent)\n for ii, sub_build in enumerate(Build.objects.filter(parent=parent)):\n for i in range(3):\n x = ii * 10 + i + 50\n Build.objects.create(part=part, reference=f'BO-{x}', title=f'{sub_build.reference}-00{i}-sub', quantity=40, parent=sub_build)\n self.assertEqual(Build.objects.count(), n + 20)\n Build.objects.rebuild()\n response = self.get(self.url, data={'parent': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 5)\n response = self.get(self.url, data={'ancestor': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 20)\n<|end_body_2|>\n", "class_docstring": "Tests for the BuildOrder LIST API.", "class_name": "BuildListTest", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BuildListTest:\n \"\"\"Tests for the BuildOrder LIST API.\"\"\"\n\n def test_get_all_builds(self):\n \"\"\"Retrieve *all* builds via the API.\"\"\"\n <|body_0|>\n\n def test_overdue(self):\n \"\"\"Create a new build, in the past.\"\"\"\n <|body_1|>\n\n def test_sub_builds(self):\n \"\"\"Test the build / sub-build relationship.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n builds = self.get(self.url)\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'active': True})\n self.assertEqual(len(builds.data), 1)\n builds = self.get(self.url, data={'status': BuildStatus.COMPLETE.value})\n self.assertEqual(len(builds.data), 4)\n builds = self.get(self.url, data={'overdue': False})\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'overdue': True})\n self.assertEqual(len(builds.data), 0)\n<|end_body_0|>\n\n<|body_start_1|>\n in_the_past = datetime.now().date() - timedelta(days=50)\n part = Part.objects.get(pk=50)\n Build.objects.create(part=part, reference='BO-0006', quantity=10, title='Just some thing', status=BuildStatus.PRODUCTION.value, target_date=in_the_past)\n response = self.get(self.url, data={'overdue': True})\n builds = response.data\n self.assertEqual(len(builds), 1)\n<|end_body_1|>\n\n<|body_start_2|>\n parent = Build.objects.get(pk=5)\n part = Part.objects.get(pk=50)\n n = Build.objects.count()\n for i in range(5):\n Build.objects.create(part=part, quantity=10, reference=f'BO-{i + 10}', title=f'Sub build {i}', parent=parent)\n for ii, sub_build in enumerate(Build.objects.filter(parent=parent)):\n for i in range(3):\n x = ii * 10 + i + 50\n Build.objects.create(part=part, reference=f'BO-{x}', title=f'{sub_build.reference}-00{i}-sub', quantity=40, parent=sub_build)\n self.assertEqual(Build.objects.count(), n + 20)\n Build.objects.rebuild()\n response = self.get(self.url, data={'parent': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 5)\n response = self.get(self.url, data={'ancestor': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 20)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000457", "length_bytes": 31747, "license_type": "permissive", "methods": [{"docstring": "Retrieve *all* builds via the API.", "name": "test_get_all_builds", "signature": "def test_get_all_builds(self)"}, {"docstring": "Create a new build, in the past.", "name": "test_overdue", "signature": "def test_overdue(self)"}, {"docstring": "Test the build / sub-build relationship.", "name": "test_sub_builds", "signature": "def test_sub_builds(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_016361", "prompt": "Implement the Python class `BuildListTest` described below.\n\nClass description:\nTests for the BuildOrder LIST API.\n\nMethod signatures and docstrings:\n- def test_get_all_builds(self): Retrieve *all* builds via the API.\n- def test_overdue(self): Create a new build, in the past.\n- def test_sub_builds(self): Test the build / sub-build relationship.", "prompted_full_text": "Implement the Python class `BuildListTest` described below.\n\nClass description:\nTests for the BuildOrder LIST API.\n\nMethod signatures and docstrings:\n- def test_get_all_builds(self): Retrieve *all* builds via the API.\n- def test_overdue(self): Create a new build, in the past.\n- def test_sub_builds(self): Test the build / sub-build relationship.\n\n<|skeleton|>\nclass BuildListTest:\n \"\"\"Tests for the BuildOrder LIST API.\"\"\"\n\n def test_get_all_builds(self):\n \"\"\"Retrieve *all* builds via the API.\"\"\"\n <|body_0|>\n\n def test_overdue(self):\n \"\"\"Create a new build, in the past.\"\"\"\n <|body_1|>\n\n def test_sub_builds(self):\n \"\"\"Test the build / sub-build relationship.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n builds = self.get(self.url)\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'active': True})\n self.assertEqual(len(builds.data), 1)\n builds = self.get(self.url, data={'status': BuildStatus.COMPLETE.value})\n self.assertEqual(len(builds.data), 4)\n builds = self.get(self.url, data={'overdue': False})\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'overdue': True})\n self.assertEqual(len(builds.data), 0)\n<|end_body_0|>\n\n<|body_start_1|>\n in_the_past = datetime.now().date() - timedelta(days=50)\n part = Part.objects.get(pk=50)\n Build.objects.create(part=part, reference='BO-0006', quantity=10, title='Just some thing', status=BuildStatus.PRODUCTION.value, target_date=in_the_past)\n response = self.get(self.url, data={'overdue': True})\n builds = response.data\n self.assertEqual(len(builds), 1)\n<|end_body_1|>\n\n<|body_start_2|>\n parent = Build.objects.get(pk=5)\n part = Part.objects.get(pk=50)\n n = Build.objects.count()\n for i in range(5):\n Build.objects.create(part=part, quantity=10, reference=f'BO-{i + 10}', title=f'Sub build {i}', parent=parent)\n for ii, sub_build in enumerate(Build.objects.filter(parent=parent)):\n for i in range(3):\n x = ii * 10 + i + 50\n Build.objects.create(part=part, reference=f'BO-{x}', title=f'{sub_build.reference}-00{i}-sub', quantity=40, parent=sub_build)\n self.assertEqual(Build.objects.count(), n + 20)\n Build.objects.rebuild()\n response = self.get(self.url, data={'parent': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 5)\n response = self.get(self.url, data={'ancestor': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 20)\n<|end_body_2|>\n", "revision_id": "e88a8e99a5f0b201c67a95cba097c729f090d5e2", "skeleton": "<|skeleton|>\nclass BuildListTest:\n \"\"\"Tests for the BuildOrder LIST API.\"\"\"\n\n def test_get_all_builds(self):\n \"\"\"Retrieve *all* builds via the API.\"\"\"\n <|body_0|>\n\n def test_overdue(self):\n \"\"\"Create a new build, in the past.\"\"\"\n <|body_1|>\n\n def test_sub_builds(self):\n \"\"\"Test the build / sub-build relationship.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BuildListTest:\n \"\"\"Tests for the BuildOrder LIST API.\"\"\"\n\n def test_get_all_builds(self):\n \"\"\"Retrieve *all* builds via the API.\"\"\"\n builds = self.get(self.url)\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'active': True})\n self.assertEqual(len(builds.data), 1)\n builds = self.get(self.url, data={'status': BuildStatus.COMPLETE.value})\n self.assertEqual(len(builds.data), 4)\n builds = self.get(self.url, data={'overdue': False})\n self.assertEqual(len(builds.data), 5)\n builds = self.get(self.url, data={'overdue': True})\n self.assertEqual(len(builds.data), 0)\n\n def test_overdue(self):\n \"\"\"Create a new build, in the past.\"\"\"\n in_the_past = datetime.now().date() - timedelta(days=50)\n part = Part.objects.get(pk=50)\n Build.objects.create(part=part, reference='BO-0006', quantity=10, title='Just some thing', status=BuildStatus.PRODUCTION.value, target_date=in_the_past)\n response = self.get(self.url, data={'overdue': True})\n builds = response.data\n self.assertEqual(len(builds), 1)\n\n def test_sub_builds(self):\n \"\"\"Test the build / sub-build relationship.\"\"\"\n parent = Build.objects.get(pk=5)\n part = Part.objects.get(pk=50)\n n = Build.objects.count()\n for i in range(5):\n Build.objects.create(part=part, quantity=10, reference=f'BO-{i + 10}', title=f'Sub build {i}', parent=parent)\n for ii, sub_build in enumerate(Build.objects.filter(parent=parent)):\n for i in range(3):\n x = ii * 10 + i + 50\n Build.objects.create(part=part, reference=f'BO-{x}', title=f'{sub_build.reference}-00{i}-sub', quantity=40, parent=sub_build)\n self.assertEqual(Build.objects.count(), n + 20)\n Build.objects.rebuild()\n response = self.get(self.url, data={'parent': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 5)\n response = self.get(self.url, data={'ancestor': parent.pk})\n builds = response.data\n self.assertEqual(len(builds), 20)\n", "source": "the_stack_v2_python_sparse", "source_path": "InvenTree/build/test_api.py", "source_repo": "inventree/InvenTree", "split": "test", "star_events_count": 3077} {"blob_id": "cf95815c5d2932e5fb958c9be9949a589ebbc3af", "bodies": ["self.idx_var = idx_var\nself.inter = inter\nself.base_purities = base_purities\nself.rule = rule\nself.rule_type = rule_type\nself.rule_mod_sizes = Counter((i[0] for i in self.rule))\nself.rule_size = float(sum(self.rule_mod_sizes.values()))\nself.rule_purities = {k: v / self.rule_size for k, v in self.rule_mod_sizes.iteritems()}\n\ndef z_score(rule_size, rule_purity, base_purity):\n \"\"\"\n Compute the z-score of a rule regarding a particular modality.\n\n Parameters:\n - rule_size, int. (i.e., the number of suject in the rule)\n - rule_purity, float. Rule purity.\n - base_purity, float. Base purity.\n\n return z-score\n \"\"\"\n if np.sqrt(base_purity * (1 - base_purity)) == 0:\n return 0\n else:\n return np.sqrt(rule_size) * (rule_purity - base_purity) / np.sqrt(base_purity * (1 - base_purity))\nself.rule_z_scores = {k: z_score(self.rule_size, self.rule_purities[k], self.base_purities[k]) for k, v in self.rule_purities.iteritems()}\nself.rule_interest_mod = key_with_max_val(self.rule_z_scores)", "rule_mod = str(int(self.rule_interest_mod))\nif self.rule_mod_sizes[self.rule_interest_mod] >= mod_size_threshold[rule_mod] and self.rule_size >= size_threshold[rule_mod] and (self.rule_purities[self.rule_interest_mod] >= purity_threshold[rule_mod]) and (self.rule_z_scores[self.rule_interest_mod] >= z_score_threshold[rule_mod]):\n return True\nelse:\n return False", "if isinstance(self.idx_var, int):\n idx_var_ = [self.idx_var]\n inter_ = [self.inter]\n rule_type_ = [self.rule_type]\nelse:\n idx_var_ = list(self.idx_var)\n inter_ = self.inter\n rule_type_ = self.rule_type\nreturn np.array([idx_var_, inter_, self.rule_size, self.rule_interest_mod, self.rule_mod_sizes[self.rule_interest_mod], self.rule_purities[self.rule_interest_mod], self.rule_z_scores[self.rule_interest_mod], rule_type_])"], "bodies_text": "<|body_start_0|>\n self.idx_var = idx_var\n self.inter = inter\n self.base_purities = base_purities\n self.rule = rule\n self.rule_type = rule_type\n self.rule_mod_sizes = Counter((i[0] for i in self.rule))\n self.rule_size = float(sum(self.rule_mod_sizes.values()))\n self.rule_purities = {k: v / self.rule_size for k, v in self.rule_mod_sizes.iteritems()}\n\n def z_score(rule_size, rule_purity, base_purity):\n \"\"\"\n Compute the z-score of a rule regarding a particular modality.\n\n Parameters:\n - rule_size, int. (i.e., the number of suject in the rule)\n - rule_purity, float. Rule purity.\n - base_purity, float. Base purity.\n\n return z-score\n \"\"\"\n if np.sqrt(base_purity * (1 - base_purity)) == 0:\n return 0\n else:\n return np.sqrt(rule_size) * (rule_purity - base_purity) / np.sqrt(base_purity * (1 - base_purity))\n self.rule_z_scores = {k: z_score(self.rule_size, self.rule_purities[k], self.base_purities[k]) for k, v in self.rule_purities.iteritems()}\n self.rule_interest_mod = key_with_max_val(self.rule_z_scores)\n<|end_body_0|>\n\n<|body_start_1|>\n rule_mod = str(int(self.rule_interest_mod))\n if self.rule_mod_sizes[self.rule_interest_mod] >= mod_size_threshold[rule_mod] and self.rule_size >= size_threshold[rule_mod] and (self.rule_purities[self.rule_interest_mod] >= purity_threshold[rule_mod]) and (self.rule_z_scores[self.rule_interest_mod] >= z_score_threshold[rule_mod]):\n return True\n else:\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.idx_var, int):\n idx_var_ = [self.idx_var]\n inter_ = [self.inter]\n rule_type_ = [self.rule_type]\n else:\n idx_var_ = list(self.idx_var)\n inter_ = self.inter\n rule_type_ = self.rule_type\n return np.array([idx_var_, inter_, self.rule_size, self.rule_interest_mod, self.rule_mod_sizes[self.rule_interest_mod], self.rule_purities[self.rule_interest_mod], self.rule_z_scores[self.rule_interest_mod], rule_type_])\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ORM_Rule", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ORM_Rule:\n\n def __init__(self, base_purities, idx_var, inter, rule, rule_type):\n \"\"\"Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo\"\"\"\n <|body_0|>\n\n def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold):\n \"\"\"Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.\"\"\"\n <|body_1|>\n\n def get_rule_metadata(self):\n \"\"\"Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.idx_var = idx_var\n self.inter = inter\n self.base_purities = base_purities\n self.rule = rule\n self.rule_type = rule_type\n self.rule_mod_sizes = Counter((i[0] for i in self.rule))\n self.rule_size = float(sum(self.rule_mod_sizes.values()))\n self.rule_purities = {k: v / self.rule_size for k, v in self.rule_mod_sizes.iteritems()}\n\n def z_score(rule_size, rule_purity, base_purity):\n \"\"\"\n Compute the z-score of a rule regarding a particular modality.\n\n Parameters:\n - rule_size, int. (i.e., the number of suject in the rule)\n - rule_purity, float. Rule purity.\n - base_purity, float. Base purity.\n\n return z-score\n \"\"\"\n if np.sqrt(base_purity * (1 - base_purity)) == 0:\n return 0\n else:\n return np.sqrt(rule_size) * (rule_purity - base_purity) / np.sqrt(base_purity * (1 - base_purity))\n self.rule_z_scores = {k: z_score(self.rule_size, self.rule_purities[k], self.base_purities[k]) for k, v in self.rule_purities.iteritems()}\n self.rule_interest_mod = key_with_max_val(self.rule_z_scores)\n<|end_body_0|>\n\n<|body_start_1|>\n rule_mod = str(int(self.rule_interest_mod))\n if self.rule_mod_sizes[self.rule_interest_mod] >= mod_size_threshold[rule_mod] and self.rule_size >= size_threshold[rule_mod] and (self.rule_purities[self.rule_interest_mod] >= purity_threshold[rule_mod]) and (self.rule_z_scores[self.rule_interest_mod] >= z_score_threshold[rule_mod]):\n return True\n else:\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.idx_var, int):\n idx_var_ = [self.idx_var]\n inter_ = [self.inter]\n rule_type_ = [self.rule_type]\n else:\n idx_var_ = list(self.idx_var)\n inter_ = self.inter\n rule_type_ = self.rule_type\n return np.array([idx_var_, inter_, self.rule_size, self.rule_interest_mod, self.rule_mod_sizes[self.rule_interest_mod], self.rule_purities[self.rule_interest_mod], self.rule_z_scores[self.rule_interest_mod], rule_type_])\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000458", "length_bytes": 17487, "license_type": "permissive", "methods": [{"docstring": "Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo", "name": "__init__", "signature": "def __init__(self, base_purities, idx_var, inter, rule, rule_type)"}, {"docstring": "Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.", "name": "is_relevant_rule", "signature": "def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold)"}, {"docstring": "Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).", "name": "get_rule_metadata", "signature": "def get_rule_metadata(self)"}], "n_methods": 3, "prompt": "Implement the Python class `ORM_Rule` described below.\n\nClass description:\nImplement the ORM_Rule class.\n\nMethod signatures and docstrings:\n- def __init__(self, base_purities, idx_var, inter, rule, rule_type): Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo\n- def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold): Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.\n- def get_rule_metadata(self): Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).", "prompted_full_text": "Implement the Python class `ORM_Rule` described below.\n\nClass description:\nImplement the ORM_Rule class.\n\nMethod signatures and docstrings:\n- def __init__(self, base_purities, idx_var, inter, rule, rule_type): Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo\n- def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold): Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.\n- def get_rule_metadata(self): Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).\n\n<|skeleton|>\nclass ORM_Rule:\n\n def __init__(self, base_purities, idx_var, inter, rule, rule_type):\n \"\"\"Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo\"\"\"\n <|body_0|>\n\n def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold):\n \"\"\"Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.\"\"\"\n <|body_1|>\n\n def get_rule_metadata(self):\n \"\"\"Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.idx_var = idx_var\n self.inter = inter\n self.base_purities = base_purities\n self.rule = rule\n self.rule_type = rule_type\n self.rule_mod_sizes = Counter((i[0] for i in self.rule))\n self.rule_size = float(sum(self.rule_mod_sizes.values()))\n self.rule_purities = {k: v / self.rule_size for k, v in self.rule_mod_sizes.iteritems()}\n\n def z_score(rule_size, rule_purity, base_purity):\n \"\"\"\n Compute the z-score of a rule regarding a particular modality.\n\n Parameters:\n - rule_size, int. (i.e., the number of suject in the rule)\n - rule_purity, float. Rule purity.\n - base_purity, float. Base purity.\n\n return z-score\n \"\"\"\n if np.sqrt(base_purity * (1 - base_purity)) == 0:\n return 0\n else:\n return np.sqrt(rule_size) * (rule_purity - base_purity) / np.sqrt(base_purity * (1 - base_purity))\n self.rule_z_scores = {k: z_score(self.rule_size, self.rule_purities[k], self.base_purities[k]) for k, v in self.rule_purities.iteritems()}\n self.rule_interest_mod = key_with_max_val(self.rule_z_scores)\n<|end_body_0|>\n\n<|body_start_1|>\n rule_mod = str(int(self.rule_interest_mod))\n if self.rule_mod_sizes[self.rule_interest_mod] >= mod_size_threshold[rule_mod] and self.rule_size >= size_threshold[rule_mod] and (self.rule_purities[self.rule_interest_mod] >= purity_threshold[rule_mod]) and (self.rule_z_scores[self.rule_interest_mod] >= z_score_threshold[rule_mod]):\n return True\n else:\n return False\n<|end_body_1|>\n\n<|body_start_2|>\n if isinstance(self.idx_var, int):\n idx_var_ = [self.idx_var]\n inter_ = [self.inter]\n rule_type_ = [self.rule_type]\n else:\n idx_var_ = list(self.idx_var)\n inter_ = self.inter\n rule_type_ = self.rule_type\n return np.array([idx_var_, inter_, self.rule_size, self.rule_interest_mod, self.rule_mod_sizes[self.rule_interest_mod], self.rule_purities[self.rule_interest_mod], self.rule_z_scores[self.rule_interest_mod], rule_type_])\n<|end_body_2|>\n", "revision_id": "ff171f40d2f9cbd129261de104a85b39bebeab1b", "skeleton": "<|skeleton|>\nclass ORM_Rule:\n\n def __init__(self, base_purities, idx_var, inter, rule, rule_type):\n \"\"\"Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo\"\"\"\n <|body_0|>\n\n def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold):\n \"\"\"Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.\"\"\"\n <|body_1|>\n\n def get_rule_metadata(self):\n \"\"\"Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ORM_Rule:\n def __init__(self, base_purities, idx_var, inter, rule, rule_type):\n \"\"\"Initialize the caracteristics of a rule. Parameters: - basePurities, dict. The purity of the training vector regarding the target. k: class modality and v: the purity for the corresponding class modality. - idx_var, int or list. index of the variable(s) - inter, list or list of list. intervale(s) (i.e., the condition(s) on the feature(s)) - rule, list of tuple. The tuple are under the form (tgt, (vf1, vf2)) where tgt correspond to the target value within the rule and vf1, vf2 correspond to the sample's values (int) for the features within the rule. - rule_type, str or list. The rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the bo\"\"\"\n self.idx_var = idx_var\n self.inter = inter\n self.base_purities = base_purities\n self.rule = rule\n self.rule_type = rule_type\n self.rule_mod_sizes = Counter((i[0] for i in self.rule))\n self.rule_size = float(sum(self.rule_mod_sizes.values()))\n self.rule_purities = {k: v / self.rule_size for k, v in self.rule_mod_sizes.iteritems()}\n\n def z_score(rule_size, rule_purity, base_purity):\n \"\"\"\n Compute the z-score of a rule regarding a particular modality.\n\n Parameters:\n - rule_size, int. (i.e., the number of suject in the rule)\n - rule_purity, float. Rule purity.\n - base_purity, float. Base purity.\n\n return z-score\n \"\"\"\n if np.sqrt(base_purity * (1 - base_purity)) == 0:\n return 0\n else:\n return np.sqrt(rule_size) * (rule_purity - base_purity) / np.sqrt(base_purity * (1 - base_purity))\n self.rule_z_scores = {k: z_score(self.rule_size, self.rule_purities[k], self.base_purities[k]) for k, v in self.rule_purities.iteritems()}\n self.rule_interest_mod = key_with_max_val(self.rule_z_scores)\n\n def is_relevant_rule(self, mod_size_threshold, size_threshold, purity_threshold, z_score_threshold):\n \"\"\"Test if a rule is relevant according to rule quality measures threshold. Parameters: - mod_size_threshold, dict of float. The modality size threshold for each one of the modalities. - size_threshold, dict of float. The size threshold for each one of the modalities. - purity_threshold, dict of float. The purity threshold for each one of the modalities. - z_score_threshold, float. The z-score threshold for each one of the modalities. Output: return True if the rule is relevant (i.e., the rule quality measures of the rule pass the threshold), False either.\"\"\"\n rule_mod = str(int(self.rule_interest_mod))\n if self.rule_mod_sizes[self.rule_interest_mod] >= mod_size_threshold[rule_mod] and self.rule_size >= size_threshold[rule_mod] and (self.rule_purities[self.rule_interest_mod] >= purity_threshold[rule_mod]) and (self.rule_z_scores[self.rule_interest_mod] >= z_score_threshold[rule_mod]):\n return True\n else:\n return False\n\n def get_rule_metadata(self):\n \"\"\"Save the rules caracteristics. Output: return a np.array of shape = [1,8] containing the rules caracteristics in the fellowing order: 1) index of the variable(s), 2) intervals (i.e., the condition(s) on the feature(s)), 3) the size of the rule (i.e., the number of suject in the rule), 4) most representing modality, 5) the rule modality size (i.e., the number of subject regarding the majority class of the rule), 6) the rule purity, 7) the rule z-score and 8) the rule type (i.e., if the rule comes from interger ordered feature(s) ('c') or categorical feature(s) ('d') or the both ('c','d')).\"\"\"\n if isinstance(self.idx_var, int):\n idx_var_ = [self.idx_var]\n inter_ = [self.inter]\n rule_type_ = [self.rule_type]\n else:\n idx_var_ = list(self.idx_var)\n inter_ = self.inter\n rule_type_ = self.rule_type\n return np.array([idx_var_, inter_, self.rule_size, self.rule_interest_mod, self.rule_mod_sizes[self.rule_interest_mod], self.rule_purities[self.rule_interest_mod], self.rule_z_scores[self.rule_interest_mod], rule_type_])\n", "source": "the_stack_v2_python_sparse", "source_path": "rule_mining/rm/RMxD.py", "source_repo": "Museau/Rule-Mining", "split": "test", "star_events_count": 3} {"blob_id": "f18e526a20e9af3da5a4004eacf4132048e63de3", "bodies": ["\"\"\"\n 次に盤に置く駒☆(^~^)\n 英語では 手番は your turn, 相手版は your opponent's turn なんで 手番という英語は無い☆(^~^)\n 自分という意味の単語はプログラム用語と被りまくるんで、\n あまり被らない 味方(friend) を手番の意味で たまたま使ってるだけだぜ☆(^~^)\n \"\"\"\nself.friend = Piece.NOUGHT\n'開始局面の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\nself.starting_board = [None] * BOARD_LEN\n'盤の上に最初から駒が何個置いてあったかだぜ☆(^~^)'\nself.starting_pieces_num = 0\n'現状の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\nself.board = [None] * BOARD_LEN\n'棋譜だぜ☆(^~^)駒を置いた番地を並べてけだぜ☆(^~^)'\nself.history = [0] * SQUARES_NUM\n'盤の上に駒が何個置いてあるかだぜ☆(^~^)'\nself.pieces_num = 0", "def cell(index: int):\n \"\"\"\n Returns\n -------\n str\n マスの横幅に合わせた石、または空欄。\n \"\"\"\n if self.board[index] is None:\n return ' '\n else:\n return f' {self.board[index]} '\ns = f'[Next {self.pieces_num + 1} move(s) | Go {self.friend}]\\n\\n'\ns += '+---+---+---+\\n|{0}|{1}|{2}| マスを選んでください。例 `do 7`\\n+---+---+---+\\n|{3}|{4}|{5}| 7 8 9\\n+---+---+---+ 4 5 6\\n|{6}|{7}|{8}| 1 2 3\\n+---+---+---+'.format(cell(7), cell(8), cell(9), cell(4), cell(5), cell(6), cell(1), cell(2), cell(3))\nreturn s", "if result == GameResult.WIN:\n return f'win {winner}'\nelif result == GameResult.DRAW:\n return f'draw'\nelse:\n None"], "bodies_text": "<|body_start_0|>\n \"\"\"\n 次に盤に置く駒☆(^~^)\n 英語では 手番は your turn, 相手版は your opponent's turn なんで 手番という英語は無い☆(^~^)\n 自分という意味の単語はプログラム用語と被りまくるんで、\n あまり被らない 味方(friend) を手番の意味で たまたま使ってるだけだぜ☆(^~^)\n \"\"\"\n self.friend = Piece.NOUGHT\n '開始局面の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.starting_board = [None] * BOARD_LEN\n '盤の上に最初から駒が何個置いてあったかだぜ☆(^~^)'\n self.starting_pieces_num = 0\n '現状の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.board = [None] * BOARD_LEN\n '棋譜だぜ☆(^~^)駒を置いた番地を並べてけだぜ☆(^~^)'\n self.history = [0] * SQUARES_NUM\n '盤の上に駒が何個置いてあるかだぜ☆(^~^)'\n self.pieces_num = 0\n<|end_body_0|>\n\n<|body_start_1|>\n def cell(index: int):\n \"\"\"\n Returns\n -------\n str\n マスの横幅に合わせた石、または空欄。\n \"\"\"\n if self.board[index] is None:\n return ' '\n else:\n return f' {self.board[index]} '\n s = f'[Next {self.pieces_num + 1} move(s) | Go {self.friend}]\\n\\n'\n s += '+---+---+---+\\n|{0}|{1}|{2}| マスを選んでください。例 `do 7`\\n+---+---+---+\\n|{3}|{4}|{5}| 7 8 9\\n+---+---+---+ 4 5 6\\n|{6}|{7}|{8}| 1 2 3\\n+---+---+---+'.format(cell(7), cell(8), cell(9), cell(4), cell(5), cell(6), cell(1), cell(2), cell(3))\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n if result == GameResult.WIN:\n return f'win {winner}'\n elif result == GameResult.DRAW:\n return f'draw'\n else:\n None\n<|end_body_2|>\n", "class_docstring": "局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)", "class_name": "Position", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Position:\n \"\"\"局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)\"\"\"\n\n def __init__(self):\n \"\"\"規定値☆(^~^)\"\"\"\n <|body_0|>\n\n def pos(self):\n \"\"\"局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面\"\"\"\n <|body_1|>\n\n def result(result: 'GameResult', winner: 'Piece'):\n \"\"\"着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n 次に盤に置く駒☆(^~^)\n 英語では 手番は your turn, 相手版は your opponent's turn なんで 手番という英語は無い☆(^~^)\n 自分という意味の単語はプログラム用語と被りまくるんで、\n あまり被らない 味方(friend) を手番の意味で たまたま使ってるだけだぜ☆(^~^)\n \"\"\"\n self.friend = Piece.NOUGHT\n '開始局面の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.starting_board = [None] * BOARD_LEN\n '盤の上に最初から駒が何個置いてあったかだぜ☆(^~^)'\n self.starting_pieces_num = 0\n '現状の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.board = [None] * BOARD_LEN\n '棋譜だぜ☆(^~^)駒を置いた番地を並べてけだぜ☆(^~^)'\n self.history = [0] * SQUARES_NUM\n '盤の上に駒が何個置いてあるかだぜ☆(^~^)'\n self.pieces_num = 0\n<|end_body_0|>\n\n<|body_start_1|>\n def cell(index: int):\n \"\"\"\n Returns\n -------\n str\n マスの横幅に合わせた石、または空欄。\n \"\"\"\n if self.board[index] is None:\n return ' '\n else:\n return f' {self.board[index]} '\n s = f'[Next {self.pieces_num + 1} move(s) | Go {self.friend}]\\n\\n'\n s += '+---+---+---+\\n|{0}|{1}|{2}| マスを選んでください。例 `do 7`\\n+---+---+---+\\n|{3}|{4}|{5}| 7 8 9\\n+---+---+---+ 4 5 6\\n|{6}|{7}|{8}| 1 2 3\\n+---+---+---+'.format(cell(7), cell(8), cell(9), cell(4), cell(5), cell(6), cell(1), cell(2), cell(3))\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n if result == GameResult.WIN:\n return f'win {winner}'\n elif result == GameResult.DRAW:\n return f'draw'\n else:\n None\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000459", "length_bytes": 11075, "license_type": "permissive", "methods": [{"docstring": "規定値☆(^~^)", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面", "name": "pos", "signature": "def pos(self)"}, {"docstring": "着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗", "name": "result", "signature": "def result(result: 'GameResult', winner: 'Piece')"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_002411", "prompt": "Implement the Python class `Position` described below.\n\nClass description:\n局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)\n\nMethod signatures and docstrings:\n- def __init__(self): 規定値☆(^~^)\n- def pos(self): 局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面\n- def result(result: 'GameResult', winner: 'Piece'): 着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗", "prompted_full_text": "Implement the Python class `Position` described below.\n\nClass description:\n局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)\n\nMethod signatures and docstrings:\n- def __init__(self): 規定値☆(^~^)\n- def pos(self): 局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面\n- def result(result: 'GameResult', winner: 'Piece'): 着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗\n\n<|skeleton|>\nclass Position:\n \"\"\"局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)\"\"\"\n\n def __init__(self):\n \"\"\"規定値☆(^~^)\"\"\"\n <|body_0|>\n\n def pos(self):\n \"\"\"局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面\"\"\"\n <|body_1|>\n\n def result(result: 'GameResult', winner: 'Piece'):\n \"\"\"着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n \"\"\"\n 次に盤に置く駒☆(^~^)\n 英語では 手番は your turn, 相手版は your opponent's turn なんで 手番という英語は無い☆(^~^)\n 自分という意味の単語はプログラム用語と被りまくるんで、\n あまり被らない 味方(friend) を手番の意味で たまたま使ってるだけだぜ☆(^~^)\n \"\"\"\n self.friend = Piece.NOUGHT\n '開始局面の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.starting_board = [None] * BOARD_LEN\n '盤の上に最初から駒が何個置いてあったかだぜ☆(^~^)'\n self.starting_pieces_num = 0\n '現状の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.board = [None] * BOARD_LEN\n '棋譜だぜ☆(^~^)駒を置いた番地を並べてけだぜ☆(^~^)'\n self.history = [0] * SQUARES_NUM\n '盤の上に駒が何個置いてあるかだぜ☆(^~^)'\n self.pieces_num = 0\n<|end_body_0|>\n\n<|body_start_1|>\n def cell(index: int):\n \"\"\"\n Returns\n -------\n str\n マスの横幅に合わせた石、または空欄。\n \"\"\"\n if self.board[index] is None:\n return ' '\n else:\n return f' {self.board[index]} '\n s = f'[Next {self.pieces_num + 1} move(s) | Go {self.friend}]\\n\\n'\n s += '+---+---+---+\\n|{0}|{1}|{2}| マスを選んでください。例 `do 7`\\n+---+---+---+\\n|{3}|{4}|{5}| 7 8 9\\n+---+---+---+ 4 5 6\\n|{6}|{7}|{8}| 1 2 3\\n+---+---+---+'.format(cell(7), cell(8), cell(9), cell(4), cell(5), cell(6), cell(1), cell(2), cell(3))\n return s\n<|end_body_1|>\n\n<|body_start_2|>\n if result == GameResult.WIN:\n return f'win {winner}'\n elif result == GameResult.DRAW:\n return f'draw'\n else:\n None\n<|end_body_2|>\n", "revision_id": "4040fdac795210a6722d1bb53859eb0ebd2132d7", "skeleton": "<|skeleton|>\nclass Position:\n \"\"\"局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)\"\"\"\n\n def __init__(self):\n \"\"\"規定値☆(^~^)\"\"\"\n <|body_0|>\n\n def pos(self):\n \"\"\"局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面\"\"\"\n <|body_1|>\n\n def result(result: 'GameResult', winner: 'Piece'):\n \"\"\"着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Position:\n \"\"\"局面☆(^~^)ゲームデータをセーブしたり、ロードしたりするときの保存されてる現状だぜ☆(^~^)\"\"\"\n\n def __init__(self):\n \"\"\"規定値☆(^~^)\"\"\"\n \"\"\"\n 次に盤に置く駒☆(^~^)\n 英語では 手番は your turn, 相手版は your opponent's turn なんで 手番という英語は無い☆(^~^)\n 自分という意味の単語はプログラム用語と被りまくるんで、\n あまり被らない 味方(friend) を手番の意味で たまたま使ってるだけだぜ☆(^~^)\n \"\"\"\n self.friend = Piece.NOUGHT\n '開始局面の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.starting_board = [None] * BOARD_LEN\n '盤の上に最初から駒が何個置いてあったかだぜ☆(^~^)'\n self.starting_pieces_num = 0\n '現状の盤の各マス☆(^~^) [0] は未使用☆(^~^)'\n self.board = [None] * BOARD_LEN\n '棋譜だぜ☆(^~^)駒を置いた番地を並べてけだぜ☆(^~^)'\n self.history = [0] * SQUARES_NUM\n '盤の上に駒が何個置いてあるかだぜ☆(^~^)'\n self.pieces_num = 0\n\n def pos(self):\n \"\"\"局面を表示するぜ☆(^~^) >>> from look_and_model import Position >>> pos = Position() >>> print(pos.pos()) [Next 1 move(s) | Go o] +---+---+---+ | | | | マスを選んでください。例 `do 7` +---+---+---+ | | | | 7 8 9 +---+---+---+ 4 5 6 | | | | 1 2 3 +---+---+---+ Returns ------- str: 局面\"\"\"\n def cell(index: int):\n \"\"\"\n Returns\n -------\n str\n マスの横幅に合わせた石、または空欄。\n \"\"\"\n if self.board[index] is None:\n return ' '\n else:\n return f' {self.board[index]} '\n s = f'[Next {self.pieces_num + 1} move(s) | Go {self.friend}]\\n\\n'\n s += '+---+---+---+\\n|{0}|{1}|{2}| マスを選んでください。例 `do 7`\\n+---+---+---+\\n|{3}|{4}|{5}| 7 8 9\\n+---+---+---+ 4 5 6\\n|{6}|{7}|{8}| 1 2 3\\n+---+---+---+'.format(cell(7), cell(8), cell(9), cell(4), cell(5), cell(6), cell(1), cell(2), cell(3))\n return s\n\n def result(result: 'GameResult', winner: 'Piece'):\n \"\"\"着いていれば勝敗を表示するぜ☆(^~^) 負けが表示されるケースは無い☆(^~^) >>> from look_and_model import Position >>> print(Position.result(GameResult.WIN, Piece.NOUGHT)) win o Returns ------- str: 着いていれば、勝敗\"\"\"\n if result == GameResult.WIN:\n return f'win {winner}'\n elif result == GameResult.DRAW:\n return f'draw'\n else:\n None\n", "source": "the_stack_v2_python_sparse", "source_path": "src/look_and_model.py", "source_repo": "muzudho/tic-tac-toe-on-python", "split": "test", "star_events_count": 1} {"blob_id": "3a0b8de2a2c252a8d35fcf7d19d0ff217926ab2f", "bodies": ["super(DCN, self).__init__()\nself.cate_fea_size = len(cate_fea_uniques)\nself.num_fea_size = num_fea_size\nself.num_layers = num_layers\nself.sparse_embedding = nn.ModuleList([nn.Embedding(voc_size, emb_size) for voc_size in cate_fea_uniques])\nself.cross_layer = Cross_Layer()\nself.all_dims = [self.cate_fea_size * emb_size + self.num_fea_size] + hidden_dims\nfor i in range(1, len(self.all_dims)):\n setattr(self, 'linear_' + str(i), nn.Linear(self.all_dims[i - 1], self.all_dims[i]))\n setattr(self, 'batchNorm_' + str(i), nn.BatchNorm1d(self.all_dims[i]))\n setattr(self, 'activation_' + str(i), nn.ReLU())\n setattr(self, 'dropout_' + str(i), nn.Dropout(dropout[i - 1]))\nself.output = nn.Linear(hidden_dims[-1] + self.cate_fea_size * emb_size + self.num_fea_size, 1)\nself.sigmoid = nn.Sigmoid()", "batch_size = X_sparse.size(0)\nsparse_feature_embed = [emb(X_sparse[:, i].unsqueeze(1)) for i, emb in enumerate(self.sparse_embedding)]\nsparse_feature_embed = torch.cat(sparse_feature_embed, dim=1)\nconcat_sparse_inputs = sparse_feature_embed.view(batch_size, -1)\nembed_input = torch.cat((concat_sparse_inputs, X_dense), dim=-1)\nx1 = x0 = embed_input\nfor i in range(self.num_layers):\n x1 = self.cross_layer(x0, x1)\ncross_layer_output = x1\ndnn_input = embed_input\nfor i in range(1, len(self.all_dims)):\n dnn_input = getattr(self, 'linear_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'batchNorm_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'activation_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'dropout_' + str(i))(dnn_input)\nstack_output = torch.cat((cross_layer_output, dnn_input), dim=-1)\nout = self.output(stack_output)\nout = self.sigmoid(out)\nreturn out"], "bodies_text": "<|body_start_0|>\n super(DCN, self).__init__()\n self.cate_fea_size = len(cate_fea_uniques)\n self.num_fea_size = num_fea_size\n self.num_layers = num_layers\n self.sparse_embedding = nn.ModuleList([nn.Embedding(voc_size, emb_size) for voc_size in cate_fea_uniques])\n self.cross_layer = Cross_Layer()\n self.all_dims = [self.cate_fea_size * emb_size + self.num_fea_size] + hidden_dims\n for i in range(1, len(self.all_dims)):\n setattr(self, 'linear_' + str(i), nn.Linear(self.all_dims[i - 1], self.all_dims[i]))\n setattr(self, 'batchNorm_' + str(i), nn.BatchNorm1d(self.all_dims[i]))\n setattr(self, 'activation_' + str(i), nn.ReLU())\n setattr(self, 'dropout_' + str(i), nn.Dropout(dropout[i - 1]))\n self.output = nn.Linear(hidden_dims[-1] + self.cate_fea_size * emb_size + self.num_fea_size, 1)\n self.sigmoid = nn.Sigmoid()\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = X_sparse.size(0)\n sparse_feature_embed = [emb(X_sparse[:, i].unsqueeze(1)) for i, emb in enumerate(self.sparse_embedding)]\n sparse_feature_embed = torch.cat(sparse_feature_embed, dim=1)\n concat_sparse_inputs = sparse_feature_embed.view(batch_size, -1)\n embed_input = torch.cat((concat_sparse_inputs, X_dense), dim=-1)\n x1 = x0 = embed_input\n for i in range(self.num_layers):\n x1 = self.cross_layer(x0, x1)\n cross_layer_output = x1\n dnn_input = embed_input\n for i in range(1, len(self.all_dims)):\n dnn_input = getattr(self, 'linear_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'batchNorm_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'activation_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'dropout_' + str(i))(dnn_input)\n stack_output = torch.cat((cross_layer_output, dnn_input), dim=-1)\n out = self.output(stack_output)\n out = self.sigmoid(out)\n return out\n<|end_body_1|>\n", "class_docstring": "", "class_name": "DCN", "detected_licenses": ["GPL-1.0-or-later", "Apache-2.0", "BSD-2-Clause", "MIT", "BSD-3-Clause", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass DCN:\n\n def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2):\n \"\"\":param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:\"\"\"\n <|body_0|>\n\n def forward(self, X_sparse, X_dense=None):\n \"\"\"X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DCN, self).__init__()\n self.cate_fea_size = len(cate_fea_uniques)\n self.num_fea_size = num_fea_size\n self.num_layers = num_layers\n self.sparse_embedding = nn.ModuleList([nn.Embedding(voc_size, emb_size) for voc_size in cate_fea_uniques])\n self.cross_layer = Cross_Layer()\n self.all_dims = [self.cate_fea_size * emb_size + self.num_fea_size] + hidden_dims\n for i in range(1, len(self.all_dims)):\n setattr(self, 'linear_' + str(i), nn.Linear(self.all_dims[i - 1], self.all_dims[i]))\n setattr(self, 'batchNorm_' + str(i), nn.BatchNorm1d(self.all_dims[i]))\n setattr(self, 'activation_' + str(i), nn.ReLU())\n setattr(self, 'dropout_' + str(i), nn.Dropout(dropout[i - 1]))\n self.output = nn.Linear(hidden_dims[-1] + self.cate_fea_size * emb_size + self.num_fea_size, 1)\n self.sigmoid = nn.Sigmoid()\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = X_sparse.size(0)\n sparse_feature_embed = [emb(X_sparse[:, i].unsqueeze(1)) for i, emb in enumerate(self.sparse_embedding)]\n sparse_feature_embed = torch.cat(sparse_feature_embed, dim=1)\n concat_sparse_inputs = sparse_feature_embed.view(batch_size, -1)\n embed_input = torch.cat((concat_sparse_inputs, X_dense), dim=-1)\n x1 = x0 = embed_input\n for i in range(self.num_layers):\n x1 = self.cross_layer(x0, x1)\n cross_layer_output = x1\n dnn_input = embed_input\n for i in range(1, len(self.all_dims)):\n dnn_input = getattr(self, 'linear_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'batchNorm_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'activation_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'dropout_' + str(i))(dnn_input)\n stack_output = torch.cat((cross_layer_output, dnn_input), dim=-1)\n out = self.output(stack_output)\n out = self.sigmoid(out)\n return out\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000460", "length_bytes": 5894, "license_type": "permissive", "methods": [{"docstring": ":param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:", "name": "__init__", "signature": "def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2)"}, {"docstring": "X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]", "name": "forward", "signature": "def forward(self, X_sparse, X_dense=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_005695", "prompt": "Implement the Python class `DCN` described below.\n\nClass description:\nImplement the DCN class.\n\nMethod signatures and docstrings:\n- def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2): :param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:\n- def forward(self, X_sparse, X_dense=None): X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]", "prompted_full_text": "Implement the Python class `DCN` described below.\n\nClass description:\nImplement the DCN class.\n\nMethod signatures and docstrings:\n- def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2): :param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:\n- def forward(self, X_sparse, X_dense=None): X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]\n\n<|skeleton|>\nclass DCN:\n\n def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2):\n \"\"\":param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:\"\"\"\n <|body_0|>\n\n def forward(self, X_sparse, X_dense=None):\n \"\"\"X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(DCN, self).__init__()\n self.cate_fea_size = len(cate_fea_uniques)\n self.num_fea_size = num_fea_size\n self.num_layers = num_layers\n self.sparse_embedding = nn.ModuleList([nn.Embedding(voc_size, emb_size) for voc_size in cate_fea_uniques])\n self.cross_layer = Cross_Layer()\n self.all_dims = [self.cate_fea_size * emb_size + self.num_fea_size] + hidden_dims\n for i in range(1, len(self.all_dims)):\n setattr(self, 'linear_' + str(i), nn.Linear(self.all_dims[i - 1], self.all_dims[i]))\n setattr(self, 'batchNorm_' + str(i), nn.BatchNorm1d(self.all_dims[i]))\n setattr(self, 'activation_' + str(i), nn.ReLU())\n setattr(self, 'dropout_' + str(i), nn.Dropout(dropout[i - 1]))\n self.output = nn.Linear(hidden_dims[-1] + self.cate_fea_size * emb_size + self.num_fea_size, 1)\n self.sigmoid = nn.Sigmoid()\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = X_sparse.size(0)\n sparse_feature_embed = [emb(X_sparse[:, i].unsqueeze(1)) for i, emb in enumerate(self.sparse_embedding)]\n sparse_feature_embed = torch.cat(sparse_feature_embed, dim=1)\n concat_sparse_inputs = sparse_feature_embed.view(batch_size, -1)\n embed_input = torch.cat((concat_sparse_inputs, X_dense), dim=-1)\n x1 = x0 = embed_input\n for i in range(self.num_layers):\n x1 = self.cross_layer(x0, x1)\n cross_layer_output = x1\n dnn_input = embed_input\n for i in range(1, len(self.all_dims)):\n dnn_input = getattr(self, 'linear_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'batchNorm_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'activation_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'dropout_' + str(i))(dnn_input)\n stack_output = torch.cat((cross_layer_output, dnn_input), dim=-1)\n out = self.output(stack_output)\n out = self.sigmoid(out)\n return out\n<|end_body_1|>\n", "revision_id": "92acc188d3a0f634de58463b6676e70df83ef808", "skeleton": "<|skeleton|>\nclass DCN:\n\n def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2):\n \"\"\":param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:\"\"\"\n <|body_0|>\n\n def forward(self, X_sparse, X_dense=None):\n \"\"\"X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class DCN:\n def __init__(self, cate_fea_uniques, num_fea_size=0, emb_size=8, hidden_dims=[256, 128], dropout=[0.2, 0.2], num_layer=2):\n \"\"\":param cate_fea_uniques: :param num_fea_size: 数字特征 也就是连续特征 :param emb_size: :param hidden_dims: :param num_classes: :param dropout:\"\"\"\n super(DCN, self).__init__()\n self.cate_fea_size = len(cate_fea_uniques)\n self.num_fea_size = num_fea_size\n self.num_layers = num_layers\n self.sparse_embedding = nn.ModuleList([nn.Embedding(voc_size, emb_size) for voc_size in cate_fea_uniques])\n self.cross_layer = Cross_Layer()\n self.all_dims = [self.cate_fea_size * emb_size + self.num_fea_size] + hidden_dims\n for i in range(1, len(self.all_dims)):\n setattr(self, 'linear_' + str(i), nn.Linear(self.all_dims[i - 1], self.all_dims[i]))\n setattr(self, 'batchNorm_' + str(i), nn.BatchNorm1d(self.all_dims[i]))\n setattr(self, 'activation_' + str(i), nn.ReLU())\n setattr(self, 'dropout_' + str(i), nn.Dropout(dropout[i - 1]))\n self.output = nn.Linear(hidden_dims[-1] + self.cate_fea_size * emb_size + self.num_fea_size, 1)\n self.sigmoid = nn.Sigmoid()\n\n def forward(self, X_sparse, X_dense=None):\n \"\"\"X_sparse: sparse_feature [batch_size, sparse_feature_num] X_dense: dense_feature [batch_size, dense_feature_num]\"\"\"\n batch_size = X_sparse.size(0)\n sparse_feature_embed = [emb(X_sparse[:, i].unsqueeze(1)) for i, emb in enumerate(self.sparse_embedding)]\n sparse_feature_embed = torch.cat(sparse_feature_embed, dim=1)\n concat_sparse_inputs = sparse_feature_embed.view(batch_size, -1)\n embed_input = torch.cat((concat_sparse_inputs, X_dense), dim=-1)\n x1 = x0 = embed_input\n for i in range(self.num_layers):\n x1 = self.cross_layer(x0, x1)\n cross_layer_output = x1\n dnn_input = embed_input\n for i in range(1, len(self.all_dims)):\n dnn_input = getattr(self, 'linear_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'batchNorm_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'activation_' + str(i))(dnn_input)\n dnn_input = getattr(self, 'dropout_' + str(i))(dnn_input)\n stack_output = torch.cat((cross_layer_output, dnn_input), dim=-1)\n out = self.output(stack_output)\n out = self.sigmoid(out)\n return out\n", "source": "the_stack_v2_python_sparse", "source_path": "PyTorch/dev/others/Widedeep_ID2866_for_PyTorch/Deep&Cross/model.py", "source_repo": "Ascend/ModelZoo-PyTorch", "split": "test", "star_events_count": 23} {"blob_id": "438982ed06b3dfe85696f774892ac9ab800ee085", "bodies": ["self.res = 0\n\ndef solve(tmps):\n if len(tmps) == 0:\n self.res += 1\n return\n for i in range(min(len(tmps), 2)):\n if int(tmps[:i + 1]) > 0 and int(tmps[:i + 1]) <= 26:\n solve(tmps[i + 1:])\n else:\n return\nsolve(s)\nreturn self.res", "n = len(s)\nif n == 0:\n return 0\ndp = [0 for _ in range(n + 1)]\ndp[n] = 1\nif s[n - 1] == '0':\n dp[n - 1] = 0\nelse:\n dp[n - 1] = 1\nfor i in range(n - 2, -1, -1):\n if int(s[i]) == 0:\n dp[i] = 0\n elif int(s[i:i + 2]) <= 26:\n dp[i] = dp[i + 1] + dp[i + 2]\n else:\n dp[i] = dp[i + 1]\nreturn dp[0]"], "bodies_text": "<|body_start_0|>\n self.res = 0\n\n def solve(tmps):\n if len(tmps) == 0:\n self.res += 1\n return\n for i in range(min(len(tmps), 2)):\n if int(tmps[:i + 1]) > 0 and int(tmps[:i + 1]) <= 26:\n solve(tmps[i + 1:])\n else:\n return\n solve(s)\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n == 0:\n return 0\n dp = [0 for _ in range(n + 1)]\n dp[n] = 1\n if s[n - 1] == '0':\n dp[n - 1] = 0\n else:\n dp[n - 1] = 1\n for i in range(n - 2, -1, -1):\n if int(s[i]) == 0:\n dp[i] = 0\n elif int(s[i:i + 2]) <= 26:\n dp[i] = dp[i + 1] + dp[i + 2]\n else:\n dp[i] = dp[i + 1]\n return dp[0]\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def numDecodings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def numDecodings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.res = 0\n\n def solve(tmps):\n if len(tmps) == 0:\n self.res += 1\n return\n for i in range(min(len(tmps), 2)):\n if int(tmps[:i + 1]) > 0 and int(tmps[:i + 1]) <= 26:\n solve(tmps[i + 1:])\n else:\n return\n solve(s)\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n == 0:\n return 0\n dp = [0 for _ in range(n + 1)]\n dp[n] = 1\n if s[n - 1] == '0':\n dp[n - 1] = 0\n else:\n dp[n - 1] = 1\n for i in range(n - 2, -1, -1):\n if int(s[i]) == 0:\n dp[i] = 0\n elif int(s[i:i + 2]) <= 26:\n dp[i] = dp[i + 1] + dp[i + 2]\n else:\n dp[i] = dp[i + 1]\n return dp[0]\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000461", "length_bytes": 1150, "license_type": "no_license", "methods": [{"docstring": ":type s: str :rtype: int", "name": "numDecodings", "signature": "def numDecodings(self, s)"}, {"docstring": ":type s: str :rtype: int", "name": "numDecodings2", "signature": "def numDecodings2(self, s)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_034671", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numDecodings(self, s): :type s: str :rtype: int\n- def numDecodings2(self, s): :type s: str :rtype: int", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def numDecodings(self, s): :type s: str :rtype: int\n- def numDecodings2(self, s): :type s: str :rtype: int\n\n<|skeleton|>\nclass Solution:\n\n def numDecodings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def numDecodings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.res = 0\n\n def solve(tmps):\n if len(tmps) == 0:\n self.res += 1\n return\n for i in range(min(len(tmps), 2)):\n if int(tmps[:i + 1]) > 0 and int(tmps[:i + 1]) <= 26:\n solve(tmps[i + 1:])\n else:\n return\n solve(s)\n return self.res\n<|end_body_0|>\n\n<|body_start_1|>\n n = len(s)\n if n == 0:\n return 0\n dp = [0 for _ in range(n + 1)]\n dp[n] = 1\n if s[n - 1] == '0':\n dp[n - 1] = 0\n else:\n dp[n - 1] = 1\n for i in range(n - 2, -1, -1):\n if int(s[i]) == 0:\n dp[i] = 0\n elif int(s[i:i + 2]) <= 26:\n dp[i] = dp[i + 1] + dp[i + 2]\n else:\n dp[i] = dp[i + 1]\n return dp[0]\n<|end_body_1|>\n", "revision_id": "a4018931622cb29dea2ba6a202aad0a4873e73d3", "skeleton": "<|skeleton|>\nclass Solution:\n\n def numDecodings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_0|>\n\n def numDecodings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def numDecodings(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n self.res = 0\n\n def solve(tmps):\n if len(tmps) == 0:\n self.res += 1\n return\n for i in range(min(len(tmps), 2)):\n if int(tmps[:i + 1]) > 0 and int(tmps[:i + 1]) <= 26:\n solve(tmps[i + 1:])\n else:\n return\n solve(s)\n return self.res\n\n def numDecodings2(self, s):\n \"\"\":type s: str :rtype: int\"\"\"\n n = len(s)\n if n == 0:\n return 0\n dp = [0 for _ in range(n + 1)]\n dp[n] = 1\n if s[n - 1] == '0':\n dp[n - 1] = 0\n else:\n dp[n - 1] = 1\n for i in range(n - 2, -1, -1):\n if int(s[i]) == 0:\n dp[i] = 0\n elif int(s[i:i + 2]) <= 26:\n dp[i] = dp[i + 1] + dp[i + 2]\n else:\n dp[i] = dp[i + 1]\n return dp[0]\n", "source": "the_stack_v2_python_sparse", "source_path": "0091. 解码方法/main.py", "source_repo": "swbuild1988/leetcode", "split": "test", "star_events_count": 0} {"blob_id": "c211c64f6568d18d2cd296e515ff53f1bfb117ef", "bodies": ["production_multiplier = 1\nstart_years = Incentives._data['start_year']\nif len(start_years[start_years <= vehicle.model_year]) > 0:\n cache_key = max(start_years[start_years <= vehicle.model_year])\n if cache_key in Incentives._data:\n calcs = Incentives._data[cache_key]\n for calc, multiplier in calcs.items():\n select_attribute, select_value = calc.split(':')\n if vehicle.__getattribute__(select_attribute) == select_value:\n production_multiplier = multiplier\nreturn production_multiplier", "Incentives._data.clear()\nif verbose:\n omega_log.logwrite('\\nInitializing database from %s...' % filename)\ninput_template_name = 'production_multipliers'\ninput_template_version = 0.21\ninput_template_columns = {'start_year'}\ntemplate_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\nif not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n df = df.set_index('start_year')\n df = df.drop([c for c in df.columns if 'Unnamed' in c], axis='columns')\n Incentives._data = df.to_dict(orient='index')\n Incentives._data['start_year'] = np.array([*Incentives._data])\nreturn template_errors"], "bodies_text": "<|body_start_0|>\n production_multiplier = 1\n start_years = Incentives._data['start_year']\n if len(start_years[start_years <= vehicle.model_year]) > 0:\n cache_key = max(start_years[start_years <= vehicle.model_year])\n if cache_key in Incentives._data:\n calcs = Incentives._data[cache_key]\n for calc, multiplier in calcs.items():\n select_attribute, select_value = calc.split(':')\n if vehicle.__getattribute__(select_attribute) == select_value:\n production_multiplier = multiplier\n return production_multiplier\n<|end_body_0|>\n\n<|body_start_1|>\n Incentives._data.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing database from %s...' % filename)\n input_template_name = 'production_multipliers'\n input_template_version = 0.21\n input_template_columns = {'start_year'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n df = df.set_index('start_year')\n df = df.drop([c for c in df.columns if 'Unnamed' in c], axis='columns')\n Incentives._data = df.to_dict(orient='index')\n Incentives._data['start_year'] = np.array([*Incentives._data])\n return template_errors\n<|end_body_1|>\n", "class_docstring": "**Loads and provides access to GHG incentives.**", "class_name": "Incentives", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Incentives:\n \"\"\"**Loads and provides access to GHG incentives.**\"\"\"\n\n def get_production_multiplier(vehicle):\n \"\"\"Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0\"\"\"\n <|body_0|>\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n production_multiplier = 1\n start_years = Incentives._data['start_year']\n if len(start_years[start_years <= vehicle.model_year]) > 0:\n cache_key = max(start_years[start_years <= vehicle.model_year])\n if cache_key in Incentives._data:\n calcs = Incentives._data[cache_key]\n for calc, multiplier in calcs.items():\n select_attribute, select_value = calc.split(':')\n if vehicle.__getattribute__(select_attribute) == select_value:\n production_multiplier = multiplier\n return production_multiplier\n<|end_body_0|>\n\n<|body_start_1|>\n Incentives._data.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing database from %s...' % filename)\n input_template_name = 'production_multipliers'\n input_template_version = 0.21\n input_template_columns = {'start_year'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n df = df.set_index('start_year')\n df = df.drop([c for c in df.columns if 'Unnamed' in c], axis='columns')\n Incentives._data = df.to_dict(orient='index')\n Incentives._data['start_year'] = np.array([*Incentives._data])\n return template_errors\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000462", "length_bytes": 5113, "license_type": "no_license", "methods": [{"docstring": "Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0", "name": "get_production_multiplier", "signature": "def get_production_multiplier(vehicle)"}, {"docstring": "Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success", "name": "init_from_file", "signature": "def init_from_file(filename, verbose=False)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_045082", "prompt": "Implement the Python class `Incentives` described below.\n\nClass description:\n**Loads and provides access to GHG incentives.**\n\nMethod signatures and docstrings:\n- def get_production_multiplier(vehicle): Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0\n- def init_from_file(filename, verbose=False): Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success", "prompted_full_text": "Implement the Python class `Incentives` described below.\n\nClass description:\n**Loads and provides access to GHG incentives.**\n\nMethod signatures and docstrings:\n- def get_production_multiplier(vehicle): Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0\n- def init_from_file(filename, verbose=False): Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\n\n<|skeleton|>\nclass Incentives:\n \"\"\"**Loads and provides access to GHG incentives.**\"\"\"\n\n def get_production_multiplier(vehicle):\n \"\"\"Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0\"\"\"\n <|body_0|>\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n production_multiplier = 1\n start_years = Incentives._data['start_year']\n if len(start_years[start_years <= vehicle.model_year]) > 0:\n cache_key = max(start_years[start_years <= vehicle.model_year])\n if cache_key in Incentives._data:\n calcs = Incentives._data[cache_key]\n for calc, multiplier in calcs.items():\n select_attribute, select_value = calc.split(':')\n if vehicle.__getattribute__(select_attribute) == select_value:\n production_multiplier = multiplier\n return production_multiplier\n<|end_body_0|>\n\n<|body_start_1|>\n Incentives._data.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing database from %s...' % filename)\n input_template_name = 'production_multipliers'\n input_template_version = 0.21\n input_template_columns = {'start_year'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n df = df.set_index('start_year')\n df = df.drop([c for c in df.columns if 'Unnamed' in c], axis='columns')\n Incentives._data = df.to_dict(orient='index')\n Incentives._data['start_year'] = np.array([*Incentives._data])\n return template_errors\n<|end_body_1|>\n", "revision_id": "afe912c57383b9de90ef30820f7977c3367a30c4", "skeleton": "<|skeleton|>\nclass Incentives:\n \"\"\"**Loads and provides access to GHG incentives.**\"\"\"\n\n def get_production_multiplier(vehicle):\n \"\"\"Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0\"\"\"\n <|body_0|>\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Incentives:\n \"\"\"**Loads and provides access to GHG incentives.**\"\"\"\n\n def get_production_multiplier(vehicle):\n \"\"\"Get production multiplier (if any) for the given vehicle. Args: vehicle (Vehicle): the vehicle to get the multiplier for Returns: The production multiplier, if applicable, or 1.0\"\"\"\n production_multiplier = 1\n start_years = Incentives._data['start_year']\n if len(start_years[start_years <= vehicle.model_year]) > 0:\n cache_key = max(start_years[start_years <= vehicle.model_year])\n if cache_key in Incentives._data:\n calcs = Incentives._data[cache_key]\n for calc, multiplier in calcs.items():\n select_attribute, select_value = calc.split(':')\n if vehicle.__getattribute__(select_attribute) == select_value:\n production_multiplier = multiplier\n return production_multiplier\n\n def init_from_file(filename, verbose=False):\n \"\"\"Initialize class data from input file. Args: filename (str): name of input file verbose (bool): enable additional console and logfile output if True Returns: List of template/input errors, else empty list on success\"\"\"\n Incentives._data.clear()\n if verbose:\n omega_log.logwrite('\\nInitializing database from %s...' % filename)\n input_template_name = 'production_multipliers'\n input_template_version = 0.21\n input_template_columns = {'start_year'}\n template_errors = validate_template_version_info(filename, input_template_name, input_template_version, verbose=verbose)\n if not template_errors:\n df = pd.read_csv(filename, skiprows=1)\n template_errors = validate_template_column_names(filename, input_template_columns, df.columns, verbose=verbose)\n if not template_errors:\n df = df.set_index('start_year')\n df = df.drop([c for c in df.columns if 'Unnamed' in c], axis='columns')\n Incentives._data = df.to_dict(orient='index')\n Incentives._data['start_year'] = np.array([*Incentives._data])\n return template_errors\n", "source": "the_stack_v2_python_sparse", "source_path": "omega_model/policy/incentives.py", "source_repo": "USEPA/EPA_OMEGA_Model", "split": "test", "star_events_count": 17} {"blob_id": "00f9d0b3e403dd3de6d8da2b9b89bd9121e29e2e", "bodies": ["super().__init__(**kwargs)\nif level_fn is None:\n level_fn = lambda outputs_old, **kwargs: tf.reduce_min(outputs_old)\nself.level_fn = level_fn\nself.temperature = temperature", "if level_fn is None:\n level_fn = self.level_fn\nwith tf.name_scope('closed_form') as scope:\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n resid = levels - means\n stddv = utils.tile_as(tf.sqrt(var), resid)\n upper_bounds = utils.safe_div(resid, stddv)\n probabilities = utils.normal_cdf(upper_bounds)\n return tf.negative(probabilities)", "if level_fn is None:\n level_fn = self.level_fn\nif temperature is None:\n temperature = self.temperature\nif num_fantasies is None:\n num_fantasies = self.num_fantasies\nwith tf.name_scope('monte_carlo') as scope:\n if samples is None:\n samples = self.draw_samples(num_fantasies, means, cov, **kwargs)\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n minima = tf.reduce_min(samples, axis=-1)\n if incumbents is not None:\n minima = tf.minimum(minima, incumbents)\n if temperature is None:\n improved = tf.cast(tf.less(minima, levels), minima.dtype)\n else:\n improved = utils.soft_less(minima, levels, temperature)\n estimate = self.reduce_samples(improved, weights, axis=-1)\n return tf.negative(estimate)"], "bodies_text": "<|body_start_0|>\n super().__init__(**kwargs)\n if level_fn is None:\n level_fn = lambda outputs_old, **kwargs: tf.reduce_min(outputs_old)\n self.level_fn = level_fn\n self.temperature = temperature\n<|end_body_0|>\n\n<|body_start_1|>\n if level_fn is None:\n level_fn = self.level_fn\n with tf.name_scope('closed_form') as scope:\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n resid = levels - means\n stddv = utils.tile_as(tf.sqrt(var), resid)\n upper_bounds = utils.safe_div(resid, stddv)\n probabilities = utils.normal_cdf(upper_bounds)\n return tf.negative(probabilities)\n<|end_body_1|>\n\n<|body_start_2|>\n if level_fn is None:\n level_fn = self.level_fn\n if temperature is None:\n temperature = self.temperature\n if num_fantasies is None:\n num_fantasies = self.num_fantasies\n with tf.name_scope('monte_carlo') as scope:\n if samples is None:\n samples = self.draw_samples(num_fantasies, means, cov, **kwargs)\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n minima = tf.reduce_min(samples, axis=-1)\n if incumbents is not None:\n minima = tf.minimum(minima, incumbents)\n if temperature is None:\n improved = tf.cast(tf.less(minima, levels), minima.dtype)\n else:\n improved = utils.soft_less(minima, levels, temperature)\n estimate = self.reduce_samples(improved, weights, axis=-1)\n return tf.negative(estimate)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "negative_pi", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass negative_pi:\n\n def __init__(self, level_fn=None, temperature=None, **kwargs):\n \"\"\"Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less\"\"\"\n <|body_0|>\n\n def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs):\n \"\"\"Closed-form expression for negative, marginal PI.\"\"\"\n <|body_1|>\n\n def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs):\n \"\"\"Monte Carlo estimate of negative q-EI.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n if level_fn is None:\n level_fn = lambda outputs_old, **kwargs: tf.reduce_min(outputs_old)\n self.level_fn = level_fn\n self.temperature = temperature\n<|end_body_0|>\n\n<|body_start_1|>\n if level_fn is None:\n level_fn = self.level_fn\n with tf.name_scope('closed_form') as scope:\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n resid = levels - means\n stddv = utils.tile_as(tf.sqrt(var), resid)\n upper_bounds = utils.safe_div(resid, stddv)\n probabilities = utils.normal_cdf(upper_bounds)\n return tf.negative(probabilities)\n<|end_body_1|>\n\n<|body_start_2|>\n if level_fn is None:\n level_fn = self.level_fn\n if temperature is None:\n temperature = self.temperature\n if num_fantasies is None:\n num_fantasies = self.num_fantasies\n with tf.name_scope('monte_carlo') as scope:\n if samples is None:\n samples = self.draw_samples(num_fantasies, means, cov, **kwargs)\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n minima = tf.reduce_min(samples, axis=-1)\n if incumbents is not None:\n minima = tf.minimum(minima, incumbents)\n if temperature is None:\n improved = tf.cast(tf.less(minima, levels), minima.dtype)\n else:\n improved = utils.soft_less(minima, levels, temperature)\n estimate = self.reduce_samples(improved, weights, axis=-1)\n return tf.negative(estimate)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000463", "length_bytes": 2799, "license_type": "no_license", "methods": [{"docstring": "Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less", "name": "__init__", "signature": "def __init__(self, level_fn=None, temperature=None, **kwargs)"}, {"docstring": "Closed-form expression for negative, marginal PI.", "name": "_closed_form", "signature": "def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs)"}, {"docstring": "Monte Carlo estimate of negative q-EI.", "name": "_monte_carlo", "signature": "def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_047052", "prompt": "Implement the Python class `negative_pi` described below.\n\nClass description:\nImplement the negative_pi class.\n\nMethod signatures and docstrings:\n- def __init__(self, level_fn=None, temperature=None, **kwargs): Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less\n- def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs): Closed-form expression for negative, marginal PI.\n- def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs): Monte Carlo estimate of negative q-EI.", "prompted_full_text": "Implement the Python class `negative_pi` described below.\n\nClass description:\nImplement the negative_pi class.\n\nMethod signatures and docstrings:\n- def __init__(self, level_fn=None, temperature=None, **kwargs): Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less\n- def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs): Closed-form expression for negative, marginal PI.\n- def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs): Monte Carlo estimate of negative q-EI.\n\n<|skeleton|>\nclass negative_pi:\n\n def __init__(self, level_fn=None, temperature=None, **kwargs):\n \"\"\"Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less\"\"\"\n <|body_0|>\n\n def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs):\n \"\"\"Closed-form expression for negative, marginal PI.\"\"\"\n <|body_1|>\n\n def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs):\n \"\"\"Monte Carlo estimate of negative q-EI.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(**kwargs)\n if level_fn is None:\n level_fn = lambda outputs_old, **kwargs: tf.reduce_min(outputs_old)\n self.level_fn = level_fn\n self.temperature = temperature\n<|end_body_0|>\n\n<|body_start_1|>\n if level_fn is None:\n level_fn = self.level_fn\n with tf.name_scope('closed_form') as scope:\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n resid = levels - means\n stddv = utils.tile_as(tf.sqrt(var), resid)\n upper_bounds = utils.safe_div(resid, stddv)\n probabilities = utils.normal_cdf(upper_bounds)\n return tf.negative(probabilities)\n<|end_body_1|>\n\n<|body_start_2|>\n if level_fn is None:\n level_fn = self.level_fn\n if temperature is None:\n temperature = self.temperature\n if num_fantasies is None:\n num_fantasies = self.num_fantasies\n with tf.name_scope('monte_carlo') as scope:\n if samples is None:\n samples = self.draw_samples(num_fantasies, means, cov, **kwargs)\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n minima = tf.reduce_min(samples, axis=-1)\n if incumbents is not None:\n minima = tf.minimum(minima, incumbents)\n if temperature is None:\n improved = tf.cast(tf.less(minima, levels), minima.dtype)\n else:\n improved = utils.soft_less(minima, levels, temperature)\n estimate = self.reduce_samples(improved, weights, axis=-1)\n return tf.negative(estimate)\n<|end_body_2|>\n", "revision_id": "f9243678f8979ea16c7c86513d6b51d061ace239", "skeleton": "<|skeleton|>\nclass negative_pi:\n\n def __init__(self, level_fn=None, temperature=None, **kwargs):\n \"\"\"Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less\"\"\"\n <|body_0|>\n\n def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs):\n \"\"\"Closed-form expression for negative, marginal PI.\"\"\"\n <|body_1|>\n\n def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs):\n \"\"\"Monte Carlo estimate of negative q-EI.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class negative_pi:\n def __init__(self, level_fn=None, temperature=None, **kwargs):\n \"\"\"Arguments level_fn : function returning the level w.r.t. improvement is measured temperature : temperature parameter for soft_less\"\"\"\n super().__init__(**kwargs)\n if level_fn is None:\n level_fn = lambda outputs_old, **kwargs: tf.reduce_min(outputs_old)\n self.level_fn = level_fn\n self.temperature = temperature\n\n def _closed_form(self, means, var, levels=None, level_fn=None, **kwargs):\n \"\"\"Closed-form expression for negative, marginal PI.\"\"\"\n if level_fn is None:\n level_fn = self.level_fn\n with tf.name_scope('closed_form') as scope:\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n resid = levels - means\n stddv = utils.tile_as(tf.sqrt(var), resid)\n upper_bounds = utils.safe_div(resid, stddv)\n probabilities = utils.normal_cdf(upper_bounds)\n return tf.negative(probabilities)\n\n def _monte_carlo(self, means, cov, levels=None, samples=None, weights=None, num_fantasies=None, temperature=None, incumbents=None, level_fn=None, **kwargs):\n \"\"\"Monte Carlo estimate of negative q-EI.\"\"\"\n if level_fn is None:\n level_fn = self.level_fn\n if temperature is None:\n temperature = self.temperature\n if num_fantasies is None:\n num_fantasies = self.num_fantasies\n with tf.name_scope('monte_carlo') as scope:\n if samples is None:\n samples = self.draw_samples(num_fantasies, means, cov, **kwargs)\n if levels is None:\n levels = level_fn(means=means, **kwargs)\n minima = tf.reduce_min(samples, axis=-1)\n if incumbents is not None:\n minima = tf.minimum(minima, incumbents)\n if temperature is None:\n improved = tf.cast(tf.less(minima, levels), minima.dtype)\n else:\n improved = utils.soft_less(minima, levels, temperature)\n estimate = self.reduce_samples(improved, weights, axis=-1)\n return tf.negative(estimate)\n", "source": "the_stack_v2_python_sparse", "source_path": "src/losses/negative_pi.py", "source_repo": "j-wilson/MaximizingAcquisitionFunctions", "split": "test", "star_events_count": 11} {"blob_id": "a0b2db2c0c5c063fca43e25a63136511efdfab2b", "bodies": ["data = request.get_json()\nsale_attendant = get_jwt_identity()['email']\nname = InputValidator.valid_string(data['name'].strip())\nquantity_to_sell = InputValidator.valid_number(data['quantity'])\npayload = ['name', 'quantity']\nfor item in data.keys():\n if item not in payload:\n return ({'message': f'The field {item} is not a valid field'}, 400)\nif Product.retrieve_product_by_name(name):\n product_on_sale = Product.retrieve_product_by_name(name)\n if quantity_to_sell <= product_on_sale['quantity']:\n total = product_on_sale['price'] * quantity_to_sell\n new_sale_record = Sales(quantity_to_sell, name, sale_attendant, total)\n added_sale_record = new_sale_record.save_record()\n Product.update_product(name, product_on_sale['category'], product_on_sale['price'], product_on_sale['quantity'] - quantity_to_sell, product_on_sale['description'], product_on_sale['prod_id'])\n new_quantity = product_on_sale['quantity'] - quantity_to_sell\n return ({'sale record': added_sale_record, 'message': f\"The quantity of {product_on_sale['name']} has been updated new quantity is {new_quantity}\"}, 200)\n return ({'message': f'The quantity you entered exceeds stoked quantity'}, 400)\nreturn ({'message': f'product {name} does not exist'}, 404)", "user = get_jwt_identity()['email']\nrole = get_jwt_claims()['role']\nsales = Sales.retrieve_sales_by_id(sale_id)\nprint(sales)\nif sales:\n if role == 'admin' or user == sales['sale_attendant']:\n return ({'sale_records': sales, 'message': 'Retrieved successfully'}, 200)\n return ({'message': 'You do not have authorization to access the sale record'}, 401)\nreturn ({'message': f'Sale of ID {sale_id} does not exist'}, 404)"], "bodies_text": "<|body_start_0|>\n data = request.get_json()\n sale_attendant = get_jwt_identity()['email']\n name = InputValidator.valid_string(data['name'].strip())\n quantity_to_sell = InputValidator.valid_number(data['quantity'])\n payload = ['name', 'quantity']\n for item in data.keys():\n if item not in payload:\n return ({'message': f'The field {item} is not a valid field'}, 400)\n if Product.retrieve_product_by_name(name):\n product_on_sale = Product.retrieve_product_by_name(name)\n if quantity_to_sell <= product_on_sale['quantity']:\n total = product_on_sale['price'] * quantity_to_sell\n new_sale_record = Sales(quantity_to_sell, name, sale_attendant, total)\n added_sale_record = new_sale_record.save_record()\n Product.update_product(name, product_on_sale['category'], product_on_sale['price'], product_on_sale['quantity'] - quantity_to_sell, product_on_sale['description'], product_on_sale['prod_id'])\n new_quantity = product_on_sale['quantity'] - quantity_to_sell\n return ({'sale record': added_sale_record, 'message': f\"The quantity of {product_on_sale['name']} has been updated new quantity is {new_quantity}\"}, 200)\n return ({'message': f'The quantity you entered exceeds stoked quantity'}, 400)\n return ({'message': f'product {name} does not exist'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n user = get_jwt_identity()['email']\n role = get_jwt_claims()['role']\n sales = Sales.retrieve_sales_by_id(sale_id)\n print(sales)\n if sales:\n if role == 'admin' or user == sales['sale_attendant']:\n return ({'sale_records': sales, 'message': 'Retrieved successfully'}, 200)\n return ({'message': 'You do not have authorization to access the sale record'}, 401)\n return ({'message': f'Sale of ID {sale_id} does not exist'}, 404)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "SalesRecordEnpoint", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SalesRecordEnpoint:\n\n def post(self):\n \"\"\"Post a sale_record\"\"\"\n <|body_0|>\n\n def get(self, sale_id):\n \"\"\"Retrieve a single sales\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.get_json()\n sale_attendant = get_jwt_identity()['email']\n name = InputValidator.valid_string(data['name'].strip())\n quantity_to_sell = InputValidator.valid_number(data['quantity'])\n payload = ['name', 'quantity']\n for item in data.keys():\n if item not in payload:\n return ({'message': f'The field {item} is not a valid field'}, 400)\n if Product.retrieve_product_by_name(name):\n product_on_sale = Product.retrieve_product_by_name(name)\n if quantity_to_sell <= product_on_sale['quantity']:\n total = product_on_sale['price'] * quantity_to_sell\n new_sale_record = Sales(quantity_to_sell, name, sale_attendant, total)\n added_sale_record = new_sale_record.save_record()\n Product.update_product(name, product_on_sale['category'], product_on_sale['price'], product_on_sale['quantity'] - quantity_to_sell, product_on_sale['description'], product_on_sale['prod_id'])\n new_quantity = product_on_sale['quantity'] - quantity_to_sell\n return ({'sale record': added_sale_record, 'message': f\"The quantity of {product_on_sale['name']} has been updated new quantity is {new_quantity}\"}, 200)\n return ({'message': f'The quantity you entered exceeds stoked quantity'}, 400)\n return ({'message': f'product {name} does not exist'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n user = get_jwt_identity()['email']\n role = get_jwt_claims()['role']\n sales = Sales.retrieve_sales_by_id(sale_id)\n print(sales)\n if sales:\n if role == 'admin' or user == sales['sale_attendant']:\n return ({'sale_records': sales, 'message': 'Retrieved successfully'}, 200)\n return ({'message': 'You do not have authorization to access the sale record'}, 401)\n return ({'message': f'Sale of ID {sale_id} does not exist'}, 404)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000464", "length_bytes": 3516, "license_type": "no_license", "methods": [{"docstring": "Post a sale_record", "name": "post", "signature": "def post(self)"}, {"docstring": "Retrieve a single sales", "name": "get", "signature": "def get(self, sale_id)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_012676", "prompt": "Implement the Python class `SalesRecordEnpoint` described below.\n\nClass description:\nImplement the SalesRecordEnpoint class.\n\nMethod signatures and docstrings:\n- def post(self): Post a sale_record\n- def get(self, sale_id): Retrieve a single sales", "prompted_full_text": "Implement the Python class `SalesRecordEnpoint` described below.\n\nClass description:\nImplement the SalesRecordEnpoint class.\n\nMethod signatures and docstrings:\n- def post(self): Post a sale_record\n- def get(self, sale_id): Retrieve a single sales\n\n<|skeleton|>\nclass SalesRecordEnpoint:\n\n def post(self):\n \"\"\"Post a sale_record\"\"\"\n <|body_0|>\n\n def get(self, sale_id):\n \"\"\"Retrieve a single sales\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n data = request.get_json()\n sale_attendant = get_jwt_identity()['email']\n name = InputValidator.valid_string(data['name'].strip())\n quantity_to_sell = InputValidator.valid_number(data['quantity'])\n payload = ['name', 'quantity']\n for item in data.keys():\n if item not in payload:\n return ({'message': f'The field {item} is not a valid field'}, 400)\n if Product.retrieve_product_by_name(name):\n product_on_sale = Product.retrieve_product_by_name(name)\n if quantity_to_sell <= product_on_sale['quantity']:\n total = product_on_sale['price'] * quantity_to_sell\n new_sale_record = Sales(quantity_to_sell, name, sale_attendant, total)\n added_sale_record = new_sale_record.save_record()\n Product.update_product(name, product_on_sale['category'], product_on_sale['price'], product_on_sale['quantity'] - quantity_to_sell, product_on_sale['description'], product_on_sale['prod_id'])\n new_quantity = product_on_sale['quantity'] - quantity_to_sell\n return ({'sale record': added_sale_record, 'message': f\"The quantity of {product_on_sale['name']} has been updated new quantity is {new_quantity}\"}, 200)\n return ({'message': f'The quantity you entered exceeds stoked quantity'}, 400)\n return ({'message': f'product {name} does not exist'}, 404)\n<|end_body_0|>\n\n<|body_start_1|>\n user = get_jwt_identity()['email']\n role = get_jwt_claims()['role']\n sales = Sales.retrieve_sales_by_id(sale_id)\n print(sales)\n if sales:\n if role == 'admin' or user == sales['sale_attendant']:\n return ({'sale_records': sales, 'message': 'Retrieved successfully'}, 200)\n return ({'message': 'You do not have authorization to access the sale record'}, 401)\n return ({'message': f'Sale of ID {sale_id} does not exist'}, 404)\n<|end_body_1|>\n", "revision_id": "d29dfcb5851e107921d6b1d06bb6ece761e7f2b9", "skeleton": "<|skeleton|>\nclass SalesRecordEnpoint:\n\n def post(self):\n \"\"\"Post a sale_record\"\"\"\n <|body_0|>\n\n def get(self, sale_id):\n \"\"\"Retrieve a single sales\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SalesRecordEnpoint:\n def post(self):\n \"\"\"Post a sale_record\"\"\"\n data = request.get_json()\n sale_attendant = get_jwt_identity()['email']\n name = InputValidator.valid_string(data['name'].strip())\n quantity_to_sell = InputValidator.valid_number(data['quantity'])\n payload = ['name', 'quantity']\n for item in data.keys():\n if item not in payload:\n return ({'message': f'The field {item} is not a valid field'}, 400)\n if Product.retrieve_product_by_name(name):\n product_on_sale = Product.retrieve_product_by_name(name)\n if quantity_to_sell <= product_on_sale['quantity']:\n total = product_on_sale['price'] * quantity_to_sell\n new_sale_record = Sales(quantity_to_sell, name, sale_attendant, total)\n added_sale_record = new_sale_record.save_record()\n Product.update_product(name, product_on_sale['category'], product_on_sale['price'], product_on_sale['quantity'] - quantity_to_sell, product_on_sale['description'], product_on_sale['prod_id'])\n new_quantity = product_on_sale['quantity'] - quantity_to_sell\n return ({'sale record': added_sale_record, 'message': f\"The quantity of {product_on_sale['name']} has been updated new quantity is {new_quantity}\"}, 200)\n return ({'message': f'The quantity you entered exceeds stoked quantity'}, 400)\n return ({'message': f'product {name} does not exist'}, 404)\n\n def get(self, sale_id):\n \"\"\"Retrieve a single sales\"\"\"\n user = get_jwt_identity()['email']\n role = get_jwt_claims()['role']\n sales = Sales.retrieve_sales_by_id(sale_id)\n print(sales)\n if sales:\n if role == 'admin' or user == sales['sale_attendant']:\n return ({'sale_records': sales, 'message': 'Retrieved successfully'}, 200)\n return ({'message': 'You do not have authorization to access the sale record'}, 401)\n return ({'message': f'Sale of ID {sale_id} does not exist'}, 404)\n", "source": "the_stack_v2_python_sparse", "source_path": "app/resources/sales_endpoints.py", "source_repo": "JoshuaKodhe/Store-Manager-Api-V2", "split": "test", "star_events_count": 1} {"blob_id": "e955820313eabe3d29d4c56fc56d7e425b7eeb83", "bodies": ["super().__init__()\nif isinstance(output_size, int):\n output_size = (output_size, output_size)\nassert len(output_size) == 2\nassert isinstance(output_size[0], int) and isinstance(output_size[1], int)\nself.output_size = output_size\nif pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\nelif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\nelif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\nelif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\nelse:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\nmin_level = -math.log2(scales[0])\nmax_level = -math.log2(scales[-1])\nassert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\nself.min_level = int(min_level)\nself.max_level = int(max_level)\nassert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\nassert 0 <= self.min_level and self.min_level <= self.max_level\nself.canonical_level = canonical_level\nassert canonical_box_size > 0\nself.canonical_box_size = canonical_box_size", "num_level_assignments = len(self.level_poolers)\nif not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\nassert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\nassert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\nif len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\npooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\nif num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\nlevel_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\nnum_channels = x[0].shape[1]\noutput_size = self.output_size[0]\noutput = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\nfor level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\nreturn output"], "bodies_text": "<|body_start_0|>\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n<|end_body_0|>\n\n<|body_start_1|>\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n<|end_body_1|>\n", "class_docstring": "Region of interest feature map pooler that supports pooling from one or more feature maps.", "class_name": "ROIPooler", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n <|body_0|>\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n<|end_body_0|>\n\n<|body_start_1|>\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000465", "length_bytes": 11509, "license_type": "permissive", "methods": [{"docstring": "Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can", "name": "__init__", "signature": "def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4)"}, {"docstring": "Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.", "name": "forward", "signature": "def forward(self, x: List[torch.Tensor], box_lists: List[Boxes])"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_049797", "prompt": "Implement the Python class `ROIPooler` described below.\n\nClass description:\nRegion of interest feature map pooler that supports pooling from one or more feature maps.\n\nMethod signatures and docstrings:\n- def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4): Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\n- def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.", "prompted_full_text": "Implement the Python class `ROIPooler` described below.\n\nClass description:\nRegion of interest feature map pooler that supports pooling from one or more feature maps.\n\nMethod signatures and docstrings:\n- def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4): Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\n- def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]): Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\n\n<|skeleton|>\nclass ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n <|body_0|>\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n<|end_body_0|>\n\n<|body_start_1|>\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n<|end_body_1|>\n", "revision_id": "80307d2d5e06f06a8a677cc2653f23a4c56402ac", "skeleton": "<|skeleton|>\nclass ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n <|body_0|>\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ROIPooler:\n \"\"\"Region of interest feature map pooler that supports pooling from one or more feature maps.\"\"\"\n\n def __init__(self, output_size, scales, sampling_ratio, pooler_type, canonical_box_size=224, canonical_level=4):\n \"\"\"Args: output_size (int, tuple[int] or list[int]): output size of the pooled region, e.g., 14 x 14. If tuple or list is given, the length must be 2. scales (list[float]): The scale for each low-level pooling op relative to the input image. For a feature map with stride s relative to the input image, scale is defined as 1/s. The stride must be power of 2. When there are multiple scales, they must form a pyramid, i.e. they must be a monotically decreasing geometric sequence with a factor of 1/2. sampling_ratio (int): The `sampling_ratio` parameter for the ROIAlign op. pooler_type (string): Name of the type of pooling operation that should be applied. For instance, \"ROIPool\" or \"ROIAlignV2\". can\"\"\"\n super().__init__()\n if isinstance(output_size, int):\n output_size = (output_size, output_size)\n assert len(output_size) == 2\n assert isinstance(output_size[0], int) and isinstance(output_size[1], int)\n self.output_size = output_size\n if pooler_type == 'ROIAlign':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=False) for scale in scales))\n elif pooler_type == 'ROIAlignV2':\n self.level_poolers = nn.ModuleList((ROIAlign(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio, aligned=True) for scale in scales))\n elif pooler_type == 'ROIPool':\n self.level_poolers = nn.ModuleList((RoIPool(output_size, spatial_scale=scale) for scale in scales))\n elif pooler_type == 'ROIAlignRotated':\n self.level_poolers = nn.ModuleList((ROIAlignRotated(output_size, spatial_scale=scale, sampling_ratio=sampling_ratio) for scale in scales))\n else:\n raise ValueError('Unknown pooler type: {}'.format(pooler_type))\n min_level = -math.log2(scales[0])\n max_level = -math.log2(scales[-1])\n assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)), 'Featuremap stride is not power of 2!'\n self.min_level = int(min_level)\n self.max_level = int(max_level)\n assert len(scales) == self.max_level - self.min_level + 1, '[ROIPooler] Sizes of input featuremaps do not form a pyramid!'\n assert 0 <= self.min_level and self.min_level <= self.max_level\n self.canonical_level = canonical_level\n assert canonical_box_size > 0\n self.canonical_box_size = canonical_box_size\n\n def forward(self, x: List[torch.Tensor], box_lists: List[Boxes]):\n \"\"\"Args: x (list[Tensor]): A list of feature maps of NCHW shape, with scales matching those used to construct this module. box_lists (list[Boxes] | list[RotatedBoxes]): A list of N Boxes or N RotatedBoxes, where N is the number of images in the batch. The box coordinates are defined on the original image and will be scaled by the `scales` argument of :class:`ROIPooler`. Returns: Tensor: A tensor of shape (M, C, output_size, output_size) where M is the total number of boxes aggregated over all N batch images and C is the number of channels in `x`.\"\"\"\n num_level_assignments = len(self.level_poolers)\n if not is_fx_tracing():\n torch._assert(isinstance(x, list) and isinstance(box_lists, list), 'Arguments to pooler must be lists')\n assert_fx_safe(len(x) == num_level_assignments, 'unequal value, num_level_assignments={}, but x is list of {} Tensors'.format(num_level_assignments, len(x)))\n assert_fx_safe(len(box_lists) == x[0].size(0), 'unequal value, x[0] batch dim 0 is {}, but box_list has length {}'.format(x[0].size(0), len(box_lists)))\n if len(box_lists) == 0:\n return _create_zeros(None, x[0].shape[1], *self.output_size, x[0])\n pooler_fmt_boxes = convert_boxes_to_pooler_format(box_lists)\n if num_level_assignments == 1:\n return self.level_poolers[0](x[0], pooler_fmt_boxes)\n level_assignments = assign_boxes_to_levels(box_lists, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level)\n num_channels = x[0].shape[1]\n output_size = self.output_size[0]\n output = _create_zeros(pooler_fmt_boxes, num_channels, output_size, output_size, x[0])\n for level, pooler in enumerate(self.level_poolers):\n inds = nonzero_tuple(level_assignments == level)[0]\n pooler_fmt_boxes_level = pooler_fmt_boxes[inds]\n output.index_put_((inds,), pooler(x[level], pooler_fmt_boxes_level))\n return output\n", "source": "the_stack_v2_python_sparse", "source_path": "detectron2/modeling/poolers.py", "source_repo": "facebookresearch/detectron2", "split": "test", "star_events_count": 27469} {"blob_id": "793211123ee95283a6c2ea10f19aea9c6ca1cf29", "bodies": ["noise = np.random.randn(x.size)\nNx = len(x)\nsignal_power = 1 / Nx * np.sum(x * x)\nnoise_power = 1 / Nx * np.sum(noise * noise)\nnoise_variance = signal_power / 10 ** (snr / 10)\nnoise = np.sqrt(noise_variance / noise_power) * noise\ny = x + noise\nreturn (y, noise)", "I = I.reshape(-1, 1)\nIn = In.reshape(-1, 1)\nPs = np.sum((I - np.mean(I)) ** 2)\nPn = np.sum((I - In) ** 2)\nsnr = 10 * np.log10(Ps / Pn)\nreturn snr", "s = s.reshape(-1, 1)\ns = s - np.mean(s)\nsL = len(s)\nif fs != fs1:\n x = librosa.resample(data, fs, fs1)\nelse:\n x = data\nx = x.reshape(-1, 1)\nx = x - np.mean(x)\nxL = len(x)\nif xL >= sL:\n x = x[0:sL]\nelse:\n print('Warning noise length < signal length, padding with zero')\n x = np.concatenate((x, np.zeros(sL - xL)))\nSr = snr\nEs = np.sum(x * x)\nEv = np.sum(s * s)\na = np.sqrt(Ev / Es / 10 ** (Sr / 10))\nnoise = a * x\nsignal = s + noise\nreturn (signal, noise)"], "bodies_text": "<|body_start_0|>\n noise = np.random.randn(x.size)\n Nx = len(x)\n signal_power = 1 / Nx * np.sum(x * x)\n noise_power = 1 / Nx * np.sum(noise * noise)\n noise_variance = signal_power / 10 ** (snr / 10)\n noise = np.sqrt(noise_variance / noise_power) * noise\n y = x + noise\n return (y, noise)\n<|end_body_0|>\n\n<|body_start_1|>\n I = I.reshape(-1, 1)\n In = In.reshape(-1, 1)\n Ps = np.sum((I - np.mean(I)) ** 2)\n Pn = np.sum((I - In) ** 2)\n snr = 10 * np.log10(Ps / Pn)\n return snr\n<|end_body_1|>\n\n<|body_start_2|>\n s = s.reshape(-1, 1)\n s = s - np.mean(s)\n sL = len(s)\n if fs != fs1:\n x = librosa.resample(data, fs, fs1)\n else:\n x = data\n x = x.reshape(-1, 1)\n x = x - np.mean(x)\n xL = len(x)\n if xL >= sL:\n x = x[0:sL]\n else:\n print('Warning noise length < signal length, padding with zero')\n x = np.concatenate((x, np.zeros(sL - xL)))\n Sr = snr\n Es = np.sum(x * x)\n Ev = np.sum(s * s)\n a = np.sqrt(Ev / Es / 10 ** (Sr / 10))\n noise = a * x\n signal = s + noise\n return (signal, noise)\n<|end_body_2|>\n", "class_docstring": "", "class_name": "Noisy", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Noisy:\n\n def Gnoisegen(self, x, snr):\n \"\"\"Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise\"\"\"\n <|body_0|>\n\n def SNR_singlech(self, I, In):\n \"\"\"calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:\"\"\"\n <|body_1|>\n\n def add_noisedata(self, s, data, fs, fs1, snr):\n \"\"\"把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n noise = np.random.randn(x.size)\n Nx = len(x)\n signal_power = 1 / Nx * np.sum(x * x)\n noise_power = 1 / Nx * np.sum(noise * noise)\n noise_variance = signal_power / 10 ** (snr / 10)\n noise = np.sqrt(noise_variance / noise_power) * noise\n y = x + noise\n return (y, noise)\n<|end_body_0|>\n\n<|body_start_1|>\n I = I.reshape(-1, 1)\n In = In.reshape(-1, 1)\n Ps = np.sum((I - np.mean(I)) ** 2)\n Pn = np.sum((I - In) ** 2)\n snr = 10 * np.log10(Ps / Pn)\n return snr\n<|end_body_1|>\n\n<|body_start_2|>\n s = s.reshape(-1, 1)\n s = s - np.mean(s)\n sL = len(s)\n if fs != fs1:\n x = librosa.resample(data, fs, fs1)\n else:\n x = data\n x = x.reshape(-1, 1)\n x = x - np.mean(x)\n xL = len(x)\n if xL >= sL:\n x = x[0:sL]\n else:\n print('Warning noise length < signal length, padding with zero')\n x = np.concatenate((x, np.zeros(sL - xL)))\n Sr = snr\n Es = np.sum(x * x)\n Ev = np.sum(s * s)\n a = np.sqrt(Ev / Es / 10 ** (Sr / 10))\n noise = a * x\n signal = s + noise\n return (signal, noise)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000466", "length_bytes": 1914, "license_type": "permissive", "methods": [{"docstring": "Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise", "name": "Gnoisegen", "signature": "def Gnoisegen(self, x, snr)"}, {"docstring": "calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:", "name": "SNR_singlech", "signature": "def SNR_singlech(self, I, In)"}, {"docstring": "把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)", "name": "add_noisedata", "signature": "def add_noisedata(self, s, data, fs, fs1, snr)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_012156", "prompt": "Implement the Python class `Noisy` described below.\n\nClass description:\nImplement the Noisy class.\n\nMethod signatures and docstrings:\n- def Gnoisegen(self, x, snr): Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise\n- def SNR_singlech(self, I, In): calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:\n- def add_noisedata(self, s, data, fs, fs1, snr): 把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)", "prompted_full_text": "Implement the Python class `Noisy` described below.\n\nClass description:\nImplement the Noisy class.\n\nMethod signatures and docstrings:\n- def Gnoisegen(self, x, snr): Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise\n- def SNR_singlech(self, I, In): calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:\n- def add_noisedata(self, s, data, fs, fs1, snr): 把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)\n\n<|skeleton|>\nclass Noisy:\n\n def Gnoisegen(self, x, snr):\n \"\"\"Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise\"\"\"\n <|body_0|>\n\n def SNR_singlech(self, I, In):\n \"\"\"calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:\"\"\"\n <|body_1|>\n\n def add_noisedata(self, s, data, fs, fs1, snr):\n \"\"\"把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n noise = np.random.randn(x.size)\n Nx = len(x)\n signal_power = 1 / Nx * np.sum(x * x)\n noise_power = 1 / Nx * np.sum(noise * noise)\n noise_variance = signal_power / 10 ** (snr / 10)\n noise = np.sqrt(noise_variance / noise_power) * noise\n y = x + noise\n return (y, noise)\n<|end_body_0|>\n\n<|body_start_1|>\n I = I.reshape(-1, 1)\n In = In.reshape(-1, 1)\n Ps = np.sum((I - np.mean(I)) ** 2)\n Pn = np.sum((I - In) ** 2)\n snr = 10 * np.log10(Ps / Pn)\n return snr\n<|end_body_1|>\n\n<|body_start_2|>\n s = s.reshape(-1, 1)\n s = s - np.mean(s)\n sL = len(s)\n if fs != fs1:\n x = librosa.resample(data, fs, fs1)\n else:\n x = data\n x = x.reshape(-1, 1)\n x = x - np.mean(x)\n xL = len(x)\n if xL >= sL:\n x = x[0:sL]\n else:\n print('Warning noise length < signal length, padding with zero')\n x = np.concatenate((x, np.zeros(sL - xL)))\n Sr = snr\n Es = np.sum(x * x)\n Ev = np.sum(s * s)\n a = np.sqrt(Ev / Es / 10 ** (Sr / 10))\n noise = a * x\n signal = s + noise\n return (signal, noise)\n<|end_body_2|>\n", "revision_id": "0074ad1d519387a75d5eca42c77f4d6966eb0a0e", "skeleton": "<|skeleton|>\nclass Noisy:\n\n def Gnoisegen(self, x, snr):\n \"\"\"Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise\"\"\"\n <|body_0|>\n\n def SNR_singlech(self, I, In):\n \"\"\"calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:\"\"\"\n <|body_1|>\n\n def add_noisedata(self, s, data, fs, fs1, snr):\n \"\"\"把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Noisy:\n def Gnoisegen(self, x, snr):\n \"\"\"Generate Gaussian white noise according to the set SNR, return noisy speech :param x: clean speech signal :param snr: SNR [dB] :return y: noisy speech :return noise: gaussian white noise\"\"\"\n noise = np.random.randn(x.size)\n Nx = len(x)\n signal_power = 1 / Nx * np.sum(x * x)\n noise_power = 1 / Nx * np.sum(noise * noise)\n noise_variance = signal_power / 10 ** (snr / 10)\n noise = np.sqrt(noise_variance / noise_power) * noise\n y = x + noise\n return (y, noise)\n\n def SNR_singlech(self, I, In):\n \"\"\"calculate SNR of noisy speech signal :param I: clean speech siganl :param In: noisy speech siganl :return snr:\"\"\"\n I = I.reshape(-1, 1)\n In = In.reshape(-1, 1)\n Ps = np.sum((I - np.mean(I)) ** 2)\n Pn = np.sum((I - In) ** 2)\n snr = 10 * np.log10(Ps / Pn)\n return snr\n\n def add_noisedata(self, s, data, fs, fs1, snr):\n \"\"\"把任意的噪声数据按设定的信噪比叠加在纯净信号上,构成带噪语音 :param s: clean speech signal :param data: arbitrary noise data :param fs: clean signal sample frequency :param fs1: data sample frequency :param snr: SNR [dB] :return noise: noise scaled by the set SNR :return signal: noisy (size: n * 1)\"\"\"\n s = s.reshape(-1, 1)\n s = s - np.mean(s)\n sL = len(s)\n if fs != fs1:\n x = librosa.resample(data, fs, fs1)\n else:\n x = data\n x = x.reshape(-1, 1)\n x = x - np.mean(x)\n xL = len(x)\n if xL >= sL:\n x = x[0:sL]\n else:\n print('Warning noise length < signal length, padding with zero')\n x = np.concatenate((x, np.zeros(sL - xL)))\n Sr = snr\n Es = np.sum(x * x)\n Ev = np.sum(s * s)\n a = np.sqrt(Ev / Es / 10 ** (Sr / 10))\n noise = a * x\n signal = s + noise\n return (signal, noise)\n", "source": "the_stack_v2_python_sparse", "source_path": "Chapter6_VoiceActivityDetection/Noisy.py", "source_repo": "BarryZM/Python_Speech_SZY", "split": "test", "star_events_count": 0} {"blob_id": "f3796b5ce9e7e3df872886bef92f085c57425ddc", "bodies": ["if self.action == 'retrieve':\n permission_classes = [IsAuthenticated]\nelse:\n permission_classes = [IsAdminUser]\nreturn [permission() for permission in permission_classes]", "if pk == 'i':\n return response.Response(UserSerializer(request.user, context={'request': request}).data)\nreturn super(UserViewSet, self).retrieve(request, pk)"], "bodies_text": "<|body_start_0|>\n if self.action == 'retrieve':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n<|end_body_0|>\n\n<|body_start_1|>\n if pk == 'i':\n return response.Response(UserSerializer(request.user, context={'request': request}).data)\n return super(UserViewSet, self).retrieve(request, pk)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "UserViewSet", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass UserViewSet:\n\n def get_permissions(self):\n \"\"\"Instantiates and returns the list of permissions that this view requires.\"\"\"\n <|body_0|>\n\n def retrieve(self, request, pk=None):\n \"\"\"este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.action == 'retrieve':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n<|end_body_0|>\n\n<|body_start_1|>\n if pk == 'i':\n return response.Response(UserSerializer(request.user, context={'request': request}).data)\n return super(UserViewSet, self).retrieve(request, pk)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000467", "length_bytes": 42051, "license_type": "permissive", "methods": [{"docstring": "Instantiates and returns the list of permissions that this view requires.", "name": "get_permissions", "signature": "def get_permissions(self)"}, {"docstring": "este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'", "name": "retrieve", "signature": "def retrieve(self, request, pk=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013396", "prompt": "Implement the Python class `UserViewSet` described below.\n\nClass description:\nImplement the UserViewSet class.\n\nMethod signatures and docstrings:\n- def get_permissions(self): Instantiates and returns the list of permissions that this view requires.\n- def retrieve(self, request, pk=None): este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'", "prompted_full_text": "Implement the Python class `UserViewSet` described below.\n\nClass description:\nImplement the UserViewSet class.\n\nMethod signatures and docstrings:\n- def get_permissions(self): Instantiates and returns the list of permissions that this view requires.\n- def retrieve(self, request, pk=None): este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'\n\n<|skeleton|>\nclass UserViewSet:\n\n def get_permissions(self):\n \"\"\"Instantiates and returns the list of permissions that this view requires.\"\"\"\n <|body_0|>\n\n def retrieve(self, request, pk=None):\n \"\"\"este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if self.action == 'retrieve':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n<|end_body_0|>\n\n<|body_start_1|>\n if pk == 'i':\n return response.Response(UserSerializer(request.user, context={'request': request}).data)\n return super(UserViewSet, self).retrieve(request, pk)\n<|end_body_1|>\n", "revision_id": "54c63d84c81cc3d2ca12485f932f12b46d0603e1", "skeleton": "<|skeleton|>\nclass UserViewSet:\n\n def get_permissions(self):\n \"\"\"Instantiates and returns the list of permissions that this view requires.\"\"\"\n <|body_0|>\n\n def retrieve(self, request, pk=None):\n \"\"\"este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class UserViewSet:\n def get_permissions(self):\n \"\"\"Instantiates and returns the list of permissions that this view requires.\"\"\"\n if self.action == 'retrieve':\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]\n\n def retrieve(self, request, pk=None):\n \"\"\"este metodo serve para retornar informacoes do usuario logado e so retorna informacao se o id passado por 'i'\"\"\"\n if pk == 'i':\n return response.Response(UserSerializer(request.user, context={'request': request}).data)\n return super(UserViewSet, self).retrieve(request, pk)\n", "source": "the_stack_v2_python_sparse", "source_path": "core_admin/tno/views.py", "source_repo": "linea-it/tno", "split": "test", "star_events_count": 1} {"blob_id": "6f51020008991ef96d863e753dbf3ac6bee12ddf", "bodies": ["self.dlm = AwsClient().connect('dlm', region_name)\ntry:\n self.dlm.get_lifecycle_policies()\nexcept EndpointConnectionError:\n print('Dlm resource is not available in this aws region')\n return", "for policy in self.list_policy(older_than_seconds):\n try:\n self.dlm.delete_lifecycle_policy(PolicyId=policy)\n print('Nuke dlm Lifecycle Policy {0}'.format(policy))\n except ClientError as exc:\n nuke_exceptions('dlm policy', policy, exc)", "response = self.dlm.get_lifecycle_policies()\nfor policy in response['Policies']:\n detailed = self.dlm.get_lifecycle_policy(PolicyId=policy['PolicyId'])\n if detailed['Policy']['DateCreated'].timestamp() < time_delete:\n yield policy['PolicyId']"], "bodies_text": "<|body_start_0|>\n self.dlm = AwsClient().connect('dlm', region_name)\n try:\n self.dlm.get_lifecycle_policies()\n except EndpointConnectionError:\n print('Dlm resource is not available in this aws region')\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for policy in self.list_policy(older_than_seconds):\n try:\n self.dlm.delete_lifecycle_policy(PolicyId=policy)\n print('Nuke dlm Lifecycle Policy {0}'.format(policy))\n except ClientError as exc:\n nuke_exceptions('dlm policy', policy, exc)\n<|end_body_1|>\n\n<|body_start_2|>\n response = self.dlm.get_lifecycle_policies()\n for policy in response['Policies']:\n detailed = self.dlm.get_lifecycle_policy(PolicyId=policy['PolicyId'])\n if detailed['Policy']['DateCreated'].timestamp() < time_delete:\n yield policy['PolicyId']\n<|end_body_2|>\n", "class_docstring": "Abstract dlm nuke in a class.", "class_name": "NukeDlm", "detected_licenses": ["Apache-2.0", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NukeDlm:\n \"\"\"Abstract dlm nuke in a class.\"\"\"\n\n def __init__(self, region_name=None) -> None:\n \"\"\"Initialize dlm nuke.\"\"\"\n <|body_0|>\n\n def nuke(self, older_than_seconds: float) -> None:\n \"\"\"Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted\"\"\"\n <|body_1|>\n\n def list_policy(self, time_delete: float) -> Iterator[str]:\n \"\"\"Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dlm = AwsClient().connect('dlm', region_name)\n try:\n self.dlm.get_lifecycle_policies()\n except EndpointConnectionError:\n print('Dlm resource is not available in this aws region')\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for policy in self.list_policy(older_than_seconds):\n try:\n self.dlm.delete_lifecycle_policy(PolicyId=policy)\n print('Nuke dlm Lifecycle Policy {0}'.format(policy))\n except ClientError as exc:\n nuke_exceptions('dlm policy', policy, exc)\n<|end_body_1|>\n\n<|body_start_2|>\n response = self.dlm.get_lifecycle_policies()\n for policy in response['Policies']:\n detailed = self.dlm.get_lifecycle_policy(PolicyId=policy['PolicyId'])\n if detailed['Policy']['DateCreated'].timestamp() < time_delete:\n yield policy['PolicyId']\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000468", "length_bytes": 2056, "license_type": "permissive", "methods": [{"docstring": "Initialize dlm nuke.", "name": "__init__", "signature": "def __init__(self, region_name=None) -> None"}, {"docstring": "Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted", "name": "nuke", "signature": "def nuke(self, older_than_seconds: float) -> None"}, {"docstring": "Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs", "name": "list_policy", "signature": "def list_policy(self, time_delete: float) -> Iterator[str]"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_044118", "prompt": "Implement the Python class `NukeDlm` described below.\n\nClass description:\nAbstract dlm nuke in a class.\n\nMethod signatures and docstrings:\n- def __init__(self, region_name=None) -> None: Initialize dlm nuke.\n- def nuke(self, older_than_seconds: float) -> None: Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted\n- def list_policy(self, time_delete: float) -> Iterator[str]: Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs", "prompted_full_text": "Implement the Python class `NukeDlm` described below.\n\nClass description:\nAbstract dlm nuke in a class.\n\nMethod signatures and docstrings:\n- def __init__(self, region_name=None) -> None: Initialize dlm nuke.\n- def nuke(self, older_than_seconds: float) -> None: Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted\n- def list_policy(self, time_delete: float) -> Iterator[str]: Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs\n\n<|skeleton|>\nclass NukeDlm:\n \"\"\"Abstract dlm nuke in a class.\"\"\"\n\n def __init__(self, region_name=None) -> None:\n \"\"\"Initialize dlm nuke.\"\"\"\n <|body_0|>\n\n def nuke(self, older_than_seconds: float) -> None:\n \"\"\"Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted\"\"\"\n <|body_1|>\n\n def list_policy(self, time_delete: float) -> Iterator[str]:\n \"\"\"Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.dlm = AwsClient().connect('dlm', region_name)\n try:\n self.dlm.get_lifecycle_policies()\n except EndpointConnectionError:\n print('Dlm resource is not available in this aws region')\n return\n<|end_body_0|>\n\n<|body_start_1|>\n for policy in self.list_policy(older_than_seconds):\n try:\n self.dlm.delete_lifecycle_policy(PolicyId=policy)\n print('Nuke dlm Lifecycle Policy {0}'.format(policy))\n except ClientError as exc:\n nuke_exceptions('dlm policy', policy, exc)\n<|end_body_1|>\n\n<|body_start_2|>\n response = self.dlm.get_lifecycle_policies()\n for policy in response['Policies']:\n detailed = self.dlm.get_lifecycle_policy(PolicyId=policy['PolicyId'])\n if detailed['Policy']['DateCreated'].timestamp() < time_delete:\n yield policy['PolicyId']\n<|end_body_2|>\n", "revision_id": "25c4159e71935a9903a41540c168992586c5ba0c", "skeleton": "<|skeleton|>\nclass NukeDlm:\n \"\"\"Abstract dlm nuke in a class.\"\"\"\n\n def __init__(self, region_name=None) -> None:\n \"\"\"Initialize dlm nuke.\"\"\"\n <|body_0|>\n\n def nuke(self, older_than_seconds: float) -> None:\n \"\"\"Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted\"\"\"\n <|body_1|>\n\n def list_policy(self, time_delete: float) -> Iterator[str]:\n \"\"\"Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NukeDlm:\n \"\"\"Abstract dlm nuke in a class.\"\"\"\n\n def __init__(self, region_name=None) -> None:\n \"\"\"Initialize dlm nuke.\"\"\"\n self.dlm = AwsClient().connect('dlm', region_name)\n try:\n self.dlm.get_lifecycle_policies()\n except EndpointConnectionError:\n print('Dlm resource is not available in this aws region')\n return\n\n def nuke(self, older_than_seconds: float) -> None:\n \"\"\"Dlm policies deleting function. Deleting all dlm policy resources with a timestamp greater than older_than_seconds. :param int older_than_seconds: The timestamp in seconds used from which the aws resource will be deleted\"\"\"\n for policy in self.list_policy(older_than_seconds):\n try:\n self.dlm.delete_lifecycle_policy(PolicyId=policy)\n print('Nuke dlm Lifecycle Policy {0}'.format(policy))\n except ClientError as exc:\n nuke_exceptions('dlm policy', policy, exc)\n\n def list_policy(self, time_delete: float) -> Iterator[str]:\n \"\"\"Data Lifecycle Policies list function. Returns the IDs of all Data Lifecycle Policies with a timestamp lower than time_delete. :param int time_delete: Timestamp in seconds used for filter Data Lifecycle policies :yield Iterator[str]: Data Lifecycle policies IDs\"\"\"\n response = self.dlm.get_lifecycle_policies()\n for policy in response['Policies']:\n detailed = self.dlm.get_lifecycle_policy(PolicyId=policy['PolicyId'])\n if detailed['Policy']['DateCreated'].timestamp() < time_delete:\n yield policy['PolicyId']\n", "source": "the_stack_v2_python_sparse", "source_path": "package/nuke/compute/dlm.py", "source_repo": "diodonfrost/terraform-aws-lambda-nuke", "split": "test", "star_events_count": 20} {"blob_id": "58717d1734286c57b86f8ce407fd87bf4ee79d17", "bodies": ["username = self.cleaned_data['username']\nif User.objects.filter(username=username):\n raise forms.ValidationError('Este Correo ya se encuentra registrado')\nreturn username", "nickname = self.cleaned_data['nickname']\nif UserProfile.objects.filter(nickname=nickname):\n raise forms.ValidationError('Este nombre de usuario ya existe')\nreturn nickname", "password = self.cleaned_data['password']\npassword2 = self.cleaned_data['password2']\nif password != password2:\n raise forms.ValidationError('Las contraseñas deben ser iguales')\nreturn password2"], "bodies_text": "<|body_start_0|>\n username = self.cleaned_data['username']\n if User.objects.filter(username=username):\n raise forms.ValidationError('Este Correo ya se encuentra registrado')\n return username\n<|end_body_0|>\n\n<|body_start_1|>\n nickname = self.cleaned_data['nickname']\n if UserProfile.objects.filter(nickname=nickname):\n raise forms.ValidationError('Este nombre de usuario ya existe')\n return nickname\n<|end_body_1|>\n\n<|body_start_2|>\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Las contraseñas deben ser iguales')\n return password2\n<|end_body_2|>\n", "class_docstring": "", "class_name": "signup_form", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass signup_form:\n\n def clean_username(self):\n \"\"\"Comprueba que no exista un username igual en la db\"\"\"\n <|body_0|>\n\n def clean_nickname(self):\n \"\"\"Comprueba que no exista un email igual en la db\"\"\"\n <|body_1|>\n\n def clean_password2(self):\n \"\"\"Comprueba que password y password2 sean iguales.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = self.cleaned_data['username']\n if User.objects.filter(username=username):\n raise forms.ValidationError('Este Correo ya se encuentra registrado')\n return username\n<|end_body_0|>\n\n<|body_start_1|>\n nickname = self.cleaned_data['nickname']\n if UserProfile.objects.filter(nickname=nickname):\n raise forms.ValidationError('Este nombre de usuario ya existe')\n return nickname\n<|end_body_1|>\n\n<|body_start_2|>\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Las contraseñas deben ser iguales')\n return password2\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000469", "length_bytes": 5427, "license_type": "no_license", "methods": [{"docstring": "Comprueba que no exista un username igual en la db", "name": "clean_username", "signature": "def clean_username(self)"}, {"docstring": "Comprueba que no exista un email igual en la db", "name": "clean_nickname", "signature": "def clean_nickname(self)"}, {"docstring": "Comprueba que password y password2 sean iguales.", "name": "clean_password2", "signature": "def clean_password2(self)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_028221", "prompt": "Implement the Python class `signup_form` described below.\n\nClass description:\nImplement the signup_form class.\n\nMethod signatures and docstrings:\n- def clean_username(self): Comprueba que no exista un username igual en la db\n- def clean_nickname(self): Comprueba que no exista un email igual en la db\n- def clean_password2(self): Comprueba que password y password2 sean iguales.", "prompted_full_text": "Implement the Python class `signup_form` described below.\n\nClass description:\nImplement the signup_form class.\n\nMethod signatures and docstrings:\n- def clean_username(self): Comprueba que no exista un username igual en la db\n- def clean_nickname(self): Comprueba que no exista un email igual en la db\n- def clean_password2(self): Comprueba que password y password2 sean iguales.\n\n<|skeleton|>\nclass signup_form:\n\n def clean_username(self):\n \"\"\"Comprueba que no exista un username igual en la db\"\"\"\n <|body_0|>\n\n def clean_nickname(self):\n \"\"\"Comprueba que no exista un email igual en la db\"\"\"\n <|body_1|>\n\n def clean_password2(self):\n \"\"\"Comprueba que password y password2 sean iguales.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n username = self.cleaned_data['username']\n if User.objects.filter(username=username):\n raise forms.ValidationError('Este Correo ya se encuentra registrado')\n return username\n<|end_body_0|>\n\n<|body_start_1|>\n nickname = self.cleaned_data['nickname']\n if UserProfile.objects.filter(nickname=nickname):\n raise forms.ValidationError('Este nombre de usuario ya existe')\n return nickname\n<|end_body_1|>\n\n<|body_start_2|>\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Las contraseñas deben ser iguales')\n return password2\n<|end_body_2|>\n", "revision_id": "5a2da8642d6c0be1933373131cc1895e68d7a575", "skeleton": "<|skeleton|>\nclass signup_form:\n\n def clean_username(self):\n \"\"\"Comprueba que no exista un username igual en la db\"\"\"\n <|body_0|>\n\n def clean_nickname(self):\n \"\"\"Comprueba que no exista un email igual en la db\"\"\"\n <|body_1|>\n\n def clean_password2(self):\n \"\"\"Comprueba que password y password2 sean iguales.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class signup_form:\n def clean_username(self):\n \"\"\"Comprueba que no exista un username igual en la db\"\"\"\n username = self.cleaned_data['username']\n if User.objects.filter(username=username):\n raise forms.ValidationError('Este Correo ya se encuentra registrado')\n return username\n\n def clean_nickname(self):\n \"\"\"Comprueba que no exista un email igual en la db\"\"\"\n nickname = self.cleaned_data['nickname']\n if UserProfile.objects.filter(nickname=nickname):\n raise forms.ValidationError('Este nombre de usuario ya existe')\n return nickname\n\n def clean_password2(self):\n \"\"\"Comprueba que password y password2 sean iguales.\"\"\"\n password = self.cleaned_data['password']\n password2 = self.cleaned_data['password2']\n if password != password2:\n raise forms.ValidationError('Las contraseñas deben ser iguales')\n return password2\n", "source": "the_stack_v2_python_sparse", "source_path": "app/forms.py", "source_repo": "almaginetx/proyectouvas", "split": "test", "star_events_count": 1} {"blob_id": "36935fe9f6bc942fcc7746228463fa316bf53ba4", "bodies": ["super().__init__()\nself.lstm_cell = StackedLSTMCell(num_layers, input_size, hidden_size)\nself.out = nn.Linear(hidden_size, input_size)", "batch_size = init_hidden[0].size(1)\nhidden_size = init_hidden[0].size(2)\nx = Variable(torch.zeros(batch_size, hidden_size)).cuda()\nh, c = init_hidden\nout_features = []\nfor i in range(seq_len):\n (last_h, last_c), (h, c) = self.lstm_cell(x, (h, c))\n x = self.out(last_h)\n out_features.append(last_h)\nreturn out_features"], "bodies_text": "<|body_start_0|>\n super().__init__()\n self.lstm_cell = StackedLSTMCell(num_layers, input_size, hidden_size)\n self.out = nn.Linear(hidden_size, input_size)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = init_hidden[0].size(1)\n hidden_size = init_hidden[0].size(2)\n x = Variable(torch.zeros(batch_size, hidden_size)).cuda()\n h, c = init_hidden\n out_features = []\n for i in range(seq_len):\n (last_h, last_c), (h, c) = self.lstm_cell(x, (h, c))\n x = self.out(last_h)\n out_features.append(last_h)\n return out_features\n<|end_body_1|>\n", "class_docstring": "", "class_name": "dLSTM", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass dLSTM:\n\n def __init__(self, input_size=2048, hidden_size=2048, num_layers=2):\n \"\"\"Decoder LSTM\"\"\"\n <|body_0|>\n\n def forward(self, seq_len, init_hidden):\n \"\"\"Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.lstm_cell = StackedLSTMCell(num_layers, input_size, hidden_size)\n self.out = nn.Linear(hidden_size, input_size)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = init_hidden[0].size(1)\n hidden_size = init_hidden[0].size(2)\n x = Variable(torch.zeros(batch_size, hidden_size)).cuda()\n h, c = init_hidden\n out_features = []\n for i in range(seq_len):\n (last_h, last_c), (h, c) = self.lstm_cell(x, (h, c))\n x = self.out(last_h)\n out_features.append(last_h)\n return out_features\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000470", "length_bytes": 5542, "license_type": "no_license", "methods": [{"docstring": "Decoder LSTM", "name": "__init__", "signature": "def __init__(self, input_size=2048, hidden_size=2048, num_layers=2)"}, {"docstring": "Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]", "name": "forward", "signature": "def forward(self, seq_len, init_hidden)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020379", "prompt": "Implement the Python class `dLSTM` described below.\n\nClass description:\nImplement the dLSTM class.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size=2048, hidden_size=2048, num_layers=2): Decoder LSTM\n- def forward(self, seq_len, init_hidden): Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]", "prompted_full_text": "Implement the Python class `dLSTM` described below.\n\nClass description:\nImplement the dLSTM class.\n\nMethod signatures and docstrings:\n- def __init__(self, input_size=2048, hidden_size=2048, num_layers=2): Decoder LSTM\n- def forward(self, seq_len, init_hidden): Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]\n\n<|skeleton|>\nclass dLSTM:\n\n def __init__(self, input_size=2048, hidden_size=2048, num_layers=2):\n \"\"\"Decoder LSTM\"\"\"\n <|body_0|>\n\n def forward(self, seq_len, init_hidden):\n \"\"\"Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__()\n self.lstm_cell = StackedLSTMCell(num_layers, input_size, hidden_size)\n self.out = nn.Linear(hidden_size, input_size)\n<|end_body_0|>\n\n<|body_start_1|>\n batch_size = init_hidden[0].size(1)\n hidden_size = init_hidden[0].size(2)\n x = Variable(torch.zeros(batch_size, hidden_size)).cuda()\n h, c = init_hidden\n out_features = []\n for i in range(seq_len):\n (last_h, last_c), (h, c) = self.lstm_cell(x, (h, c))\n x = self.out(last_h)\n out_features.append(last_h)\n return out_features\n<|end_body_1|>\n", "revision_id": "fb6d5bf70479373f96f2d944c672af8286c9bc89", "skeleton": "<|skeleton|>\nclass dLSTM:\n\n def __init__(self, input_size=2048, hidden_size=2048, num_layers=2):\n \"\"\"Decoder LSTM\"\"\"\n <|body_0|>\n\n def forward(self, seq_len, init_hidden):\n \"\"\"Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class dLSTM:\n def __init__(self, input_size=2048, hidden_size=2048, num_layers=2):\n \"\"\"Decoder LSTM\"\"\"\n super().__init__()\n self.lstm_cell = StackedLSTMCell(num_layers, input_size, hidden_size)\n self.out = nn.Linear(hidden_size, input_size)\n\n def forward(self, seq_len, init_hidden):\n \"\"\"Args: seq_len (int) init_hidden h [num_layers=2, 1, hidden_size] c [num_layers=2, 1, hidden_size] Return: out_features: [seq_len, 1, hidden_size]\"\"\"\n batch_size = init_hidden[0].size(1)\n hidden_size = init_hidden[0].size(2)\n x = Variable(torch.zeros(batch_size, hidden_size)).cuda()\n h, c = init_hidden\n out_features = []\n for i in range(seq_len):\n (last_h, last_c), (h, c) = self.lstm_cell(x, (h, c))\n x = self.out(last_h)\n out_features.append(last_h)\n return out_features\n", "source": "the_stack_v2_python_sparse", "source_path": "layers/summarizer.py", "source_repo": "j-min/Adversarial_Video_Summary", "split": "test", "star_events_count": 253} {"blob_id": "bcdf399eaf1d0a6adfde3ce1a14e875e0b3c1b99", "bodies": ["if not root:\n return str([])\nqueue = [root]\nres = []\nwhile queue:\n curr = queue.pop(0)\n res.append(curr.val if curr else None)\n if not curr:\n continue\n queue.append(curr.left)\n queue.append(curr.right)\nreturn str(res)", "str_list = eval(data)\nif not str_list:\n return None\nroot_val = str_list.pop(0)\nroot = TreeNode(root_val)\nqueue = [root]\nwhile queue:\n curr = queue.pop(0)\n if not curr:\n continue\n left_val = str_list.pop(0)\n right_val = str_list.pop(0)\n left = TreeNode(left_val) if left_val != None else None\n right = TreeNode(right_val) if right_val != None else None\n curr.left = left\n curr.right = right\n queue.append(left)\n queue.append(right)\nreturn root"], "bodies_text": "<|body_start_0|>\n if not root:\n return str([])\n queue = [root]\n res = []\n while queue:\n curr = queue.pop(0)\n res.append(curr.val if curr else None)\n if not curr:\n continue\n queue.append(curr.left)\n queue.append(curr.right)\n return str(res)\n<|end_body_0|>\n\n<|body_start_1|>\n str_list = eval(data)\n if not str_list:\n return None\n root_val = str_list.pop(0)\n root = TreeNode(root_val)\n queue = [root]\n while queue:\n curr = queue.pop(0)\n if not curr:\n continue\n left_val = str_list.pop(0)\n right_val = str_list.pop(0)\n left = TreeNode(left_val) if left_val != None else None\n right = TreeNode(right_val) if right_val != None else None\n curr.left = left\n curr.right = right\n queue.append(left)\n queue.append(right)\n return root\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Codec", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return str([])\n queue = [root]\n res = []\n while queue:\n curr = queue.pop(0)\n res.append(curr.val if curr else None)\n if not curr:\n continue\n queue.append(curr.left)\n queue.append(curr.right)\n return str(res)\n<|end_body_0|>\n\n<|body_start_1|>\n str_list = eval(data)\n if not str_list:\n return None\n root_val = str_list.pop(0)\n root = TreeNode(root_val)\n queue = [root]\n while queue:\n curr = queue.pop(0)\n if not curr:\n continue\n left_val = str_list.pop(0)\n right_val = str_list.pop(0)\n left = TreeNode(left_val) if left_val != None else None\n right = TreeNode(right_val) if right_val != None else None\n curr.left = left\n curr.right = right\n queue.append(left)\n queue.append(right)\n return root\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000471", "length_bytes": 1559, "license_type": "no_license", "methods": [{"docstring": "Encodes a tree to a single string. :type root: TreeNode :rtype: str", "name": "serialize", "signature": "def serialize(self, root)"}, {"docstring": "Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "name": "deserialize", "signature": "def deserialize(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036421", "prompt": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode", "prompted_full_text": "Implement the Python class `Codec` described below.\n\nClass description:\nImplement the Codec class.\n\nMethod signatures and docstrings:\n- def serialize(self, root): Encodes a tree to a single string. :type root: TreeNode :rtype: str\n- def deserialize(self, data): Decodes your encoded data to tree. :type data: str :rtype: TreeNode\n\n<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if not root:\n return str([])\n queue = [root]\n res = []\n while queue:\n curr = queue.pop(0)\n res.append(curr.val if curr else None)\n if not curr:\n continue\n queue.append(curr.left)\n queue.append(curr.right)\n return str(res)\n<|end_body_0|>\n\n<|body_start_1|>\n str_list = eval(data)\n if not str_list:\n return None\n root_val = str_list.pop(0)\n root = TreeNode(root_val)\n queue = [root]\n while queue:\n curr = queue.pop(0)\n if not curr:\n continue\n left_val = str_list.pop(0)\n right_val = str_list.pop(0)\n left = TreeNode(left_val) if left_val != None else None\n right = TreeNode(right_val) if right_val != None else None\n curr.left = left\n curr.right = right\n queue.append(left)\n queue.append(right)\n return root\n<|end_body_1|>\n", "revision_id": "0cc7ad64891a23e348c8214f806a2820ac8c9e0a", "skeleton": "<|skeleton|>\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n <|body_0|>\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Codec:\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string. :type root: TreeNode :rtype: str\"\"\"\n if not root:\n return str([])\n queue = [root]\n res = []\n while queue:\n curr = queue.pop(0)\n res.append(curr.val if curr else None)\n if not curr:\n continue\n queue.append(curr.left)\n queue.append(curr.right)\n return str(res)\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree. :type data: str :rtype: TreeNode\"\"\"\n str_list = eval(data)\n if not str_list:\n return None\n root_val = str_list.pop(0)\n root = TreeNode(root_val)\n queue = [root]\n while queue:\n curr = queue.pop(0)\n if not curr:\n continue\n left_val = str_list.pop(0)\n right_val = str_list.pop(0)\n left = TreeNode(left_val) if left_val != None else None\n right = TreeNode(right_val) if right_val != None else None\n curr.left = left\n curr.right = right\n queue.append(left)\n queue.append(right)\n return root\n", "source": "the_stack_v2_python_sparse", "source_path": "Top Interview Questions/297. Serialize and Deserialize Binary Tree/solution.py", "source_repo": "jw3329/leetcode-problem-solving", "split": "test", "star_events_count": 0} {"blob_id": "1bbb11067df0cc8bbc821fc2c314494b163dbe65", "bodies": ["super(ProgressScene, self).__init__()\nself._page = 0\nself._num_pages = math.ceil(len(get_prize_names()) / (self.ROWS * self.COLUMNS))\nself._key_press_time = 0", "super().update(dt)\nkeys = pygame.key.get_pressed()\nif keys[pygame.K_a] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = max(self._page - 1, 0)\nelif keys[pygame.K_d] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = min(self._page + 1, self._num_pages - 1)", "super().render(surface)\nr, c = (0, 0)\nstart_idx = self._page * self.COLUMNS * self.ROWS\nend_idx = start_idx + self.COLUMNS * self.ROWS\nnames = get_prize_names()\nfor i in range(start_idx, end_idx):\n if i >= len(names):\n break\n elif c >= self.COLUMNS:\n r += 1\n c = 0\n x = c * (self.CELL_SIZE + self.MARGIN_X) + (surface.get_width() / 2 - (self.COLUMNS - 0.25) * (self.CELL_SIZE + self.MARGIN_X) / 2)\n y = r * (self.CELL_SIZE + self.MARGIN_Y) + (surface.get_height() / 2 - (self.ROWS - 0.25) * (self.CELL_SIZE + self.MARGIN_Y) / 2)\n c += 1\n prize_name = names[i]\n count = get_prize_count(prize_name)\n image = get_prize_image(prize_name)\n image = pygame.transform.smoothscale(image, (self.CELL_SIZE, self.CELL_SIZE))\n surface.blit(image, (x, y))\n draw_text(surface, prize_name, 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE))\n draw_text(surface, f'{count}', 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE + 20))\ndraw_text(surface, f'Page {self._page + 1} / {self._num_pages}', 'Comic Sans MS', 20, (255, 255, 255), (0, 0))"], "bodies_text": "<|body_start_0|>\n super(ProgressScene, self).__init__()\n self._page = 0\n self._num_pages = math.ceil(len(get_prize_names()) / (self.ROWS * self.COLUMNS))\n self._key_press_time = 0\n<|end_body_0|>\n\n<|body_start_1|>\n super().update(dt)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = max(self._page - 1, 0)\n elif keys[pygame.K_d] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = min(self._page + 1, self._num_pages - 1)\n<|end_body_1|>\n\n<|body_start_2|>\n super().render(surface)\n r, c = (0, 0)\n start_idx = self._page * self.COLUMNS * self.ROWS\n end_idx = start_idx + self.COLUMNS * self.ROWS\n names = get_prize_names()\n for i in range(start_idx, end_idx):\n if i >= len(names):\n break\n elif c >= self.COLUMNS:\n r += 1\n c = 0\n x = c * (self.CELL_SIZE + self.MARGIN_X) + (surface.get_width() / 2 - (self.COLUMNS - 0.25) * (self.CELL_SIZE + self.MARGIN_X) / 2)\n y = r * (self.CELL_SIZE + self.MARGIN_Y) + (surface.get_height() / 2 - (self.ROWS - 0.25) * (self.CELL_SIZE + self.MARGIN_Y) / 2)\n c += 1\n prize_name = names[i]\n count = get_prize_count(prize_name)\n image = get_prize_image(prize_name)\n image = pygame.transform.smoothscale(image, (self.CELL_SIZE, self.CELL_SIZE))\n surface.blit(image, (x, y))\n draw_text(surface, prize_name, 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE))\n draw_text(surface, f'{count}', 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE + 20))\n draw_text(surface, f'Page {self._page + 1} / {self._num_pages}', 'Comic Sans MS', 20, (255, 255, 255), (0, 0))\n<|end_body_2|>\n", "class_docstring": "", "class_name": "ProgressScene", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ProgressScene:\n\n def __init__(self):\n \"\"\"A scene containing images of all the pokemon and their names/win count.\"\"\"\n <|body_0|>\n\n def update(self, dt: float):\n \"\"\"Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update\"\"\"\n <|body_1|>\n\n def render(self, surface: pygame.surface.Surface):\n \"\"\"Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ProgressScene, self).__init__()\n self._page = 0\n self._num_pages = math.ceil(len(get_prize_names()) / (self.ROWS * self.COLUMNS))\n self._key_press_time = 0\n<|end_body_0|>\n\n<|body_start_1|>\n super().update(dt)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = max(self._page - 1, 0)\n elif keys[pygame.K_d] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = min(self._page + 1, self._num_pages - 1)\n<|end_body_1|>\n\n<|body_start_2|>\n super().render(surface)\n r, c = (0, 0)\n start_idx = self._page * self.COLUMNS * self.ROWS\n end_idx = start_idx + self.COLUMNS * self.ROWS\n names = get_prize_names()\n for i in range(start_idx, end_idx):\n if i >= len(names):\n break\n elif c >= self.COLUMNS:\n r += 1\n c = 0\n x = c * (self.CELL_SIZE + self.MARGIN_X) + (surface.get_width() / 2 - (self.COLUMNS - 0.25) * (self.CELL_SIZE + self.MARGIN_X) / 2)\n y = r * (self.CELL_SIZE + self.MARGIN_Y) + (surface.get_height() / 2 - (self.ROWS - 0.25) * (self.CELL_SIZE + self.MARGIN_Y) / 2)\n c += 1\n prize_name = names[i]\n count = get_prize_count(prize_name)\n image = get_prize_image(prize_name)\n image = pygame.transform.smoothscale(image, (self.CELL_SIZE, self.CELL_SIZE))\n surface.blit(image, (x, y))\n draw_text(surface, prize_name, 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE))\n draw_text(surface, f'{count}', 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE + 20))\n draw_text(surface, f'Page {self._page + 1} / {self._num_pages}', 'Comic Sans MS', 20, (255, 255, 255), (0, 0))\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000472", "length_bytes": 2910, "license_type": "no_license", "methods": [{"docstring": "A scene containing images of all the pokemon and their names/win count.", "name": "__init__", "signature": "def __init__(self)"}, {"docstring": "Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update", "name": "update", "signature": "def update(self, dt: float)"}, {"docstring": "Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.", "name": "render", "signature": "def render(self, surface: pygame.surface.Surface)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_044597", "prompt": "Implement the Python class `ProgressScene` described below.\n\nClass description:\nImplement the ProgressScene class.\n\nMethod signatures and docstrings:\n- def __init__(self): A scene containing images of all the pokemon and their names/win count.\n- def update(self, dt: float): Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update\n- def render(self, surface: pygame.surface.Surface): Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.", "prompted_full_text": "Implement the Python class `ProgressScene` described below.\n\nClass description:\nImplement the ProgressScene class.\n\nMethod signatures and docstrings:\n- def __init__(self): A scene containing images of all the pokemon and their names/win count.\n- def update(self, dt: float): Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update\n- def render(self, surface: pygame.surface.Surface): Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.\n\n<|skeleton|>\nclass ProgressScene:\n\n def __init__(self):\n \"\"\"A scene containing images of all the pokemon and their names/win count.\"\"\"\n <|body_0|>\n\n def update(self, dt: float):\n \"\"\"Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update\"\"\"\n <|body_1|>\n\n def render(self, surface: pygame.surface.Surface):\n \"\"\"Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(ProgressScene, self).__init__()\n self._page = 0\n self._num_pages = math.ceil(len(get_prize_names()) / (self.ROWS * self.COLUMNS))\n self._key_press_time = 0\n<|end_body_0|>\n\n<|body_start_1|>\n super().update(dt)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = max(self._page - 1, 0)\n elif keys[pygame.K_d] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = min(self._page + 1, self._num_pages - 1)\n<|end_body_1|>\n\n<|body_start_2|>\n super().render(surface)\n r, c = (0, 0)\n start_idx = self._page * self.COLUMNS * self.ROWS\n end_idx = start_idx + self.COLUMNS * self.ROWS\n names = get_prize_names()\n for i in range(start_idx, end_idx):\n if i >= len(names):\n break\n elif c >= self.COLUMNS:\n r += 1\n c = 0\n x = c * (self.CELL_SIZE + self.MARGIN_X) + (surface.get_width() / 2 - (self.COLUMNS - 0.25) * (self.CELL_SIZE + self.MARGIN_X) / 2)\n y = r * (self.CELL_SIZE + self.MARGIN_Y) + (surface.get_height() / 2 - (self.ROWS - 0.25) * (self.CELL_SIZE + self.MARGIN_Y) / 2)\n c += 1\n prize_name = names[i]\n count = get_prize_count(prize_name)\n image = get_prize_image(prize_name)\n image = pygame.transform.smoothscale(image, (self.CELL_SIZE, self.CELL_SIZE))\n surface.blit(image, (x, y))\n draw_text(surface, prize_name, 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE))\n draw_text(surface, f'{count}', 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE + 20))\n draw_text(surface, f'Page {self._page + 1} / {self._num_pages}', 'Comic Sans MS', 20, (255, 255, 255), (0, 0))\n<|end_body_2|>\n", "revision_id": "115e2ea23e0b7aba41a90ef07d0a239314f1d6cf", "skeleton": "<|skeleton|>\nclass ProgressScene:\n\n def __init__(self):\n \"\"\"A scene containing images of all the pokemon and their names/win count.\"\"\"\n <|body_0|>\n\n def update(self, dt: float):\n \"\"\"Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update\"\"\"\n <|body_1|>\n\n def render(self, surface: pygame.surface.Surface):\n \"\"\"Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ProgressScene:\n def __init__(self):\n \"\"\"A scene containing images of all the pokemon and their names/win count.\"\"\"\n super(ProgressScene, self).__init__()\n self._page = 0\n self._num_pages = math.ceil(len(get_prize_names()) / (self.ROWS * self.COLUMNS))\n self._key_press_time = 0\n\n def update(self, dt: float):\n \"\"\"Updates which page is currently being shown based on key presses. Args: dt (float): the time in seconds since the last update\"\"\"\n super().update(dt)\n keys = pygame.key.get_pressed()\n if keys[pygame.K_a] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = max(self._page - 1, 0)\n elif keys[pygame.K_d] and time.time() - self._key_press_time > 0.25:\n self._key_press_time = time.time()\n self._page = min(self._page + 1, self._num_pages - 1)\n\n def render(self, surface: pygame.surface.Surface):\n \"\"\"Renders all the pokemon stuff to the surface. Args: surface (Surface): the surface to render to.\"\"\"\n super().render(surface)\n r, c = (0, 0)\n start_idx = self._page * self.COLUMNS * self.ROWS\n end_idx = start_idx + self.COLUMNS * self.ROWS\n names = get_prize_names()\n for i in range(start_idx, end_idx):\n if i >= len(names):\n break\n elif c >= self.COLUMNS:\n r += 1\n c = 0\n x = c * (self.CELL_SIZE + self.MARGIN_X) + (surface.get_width() / 2 - (self.COLUMNS - 0.25) * (self.CELL_SIZE + self.MARGIN_X) / 2)\n y = r * (self.CELL_SIZE + self.MARGIN_Y) + (surface.get_height() / 2 - (self.ROWS - 0.25) * (self.CELL_SIZE + self.MARGIN_Y) / 2)\n c += 1\n prize_name = names[i]\n count = get_prize_count(prize_name)\n image = get_prize_image(prize_name)\n image = pygame.transform.smoothscale(image, (self.CELL_SIZE, self.CELL_SIZE))\n surface.blit(image, (x, y))\n draw_text(surface, prize_name, 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE))\n draw_text(surface, f'{count}', 'Comic Sans MS', 20, (255, 255, 255), (x, y + self.CELL_SIZE + 20))\n draw_text(surface, f'Page {self._page + 1} / {self._num_pages}', 'Comic Sans MS', 20, (255, 255, 255), (0, 0))\n", "source": "the_stack_v2_python_sparse", "source_path": "crane/game/scene/progress_scene/progress_scene.py", "source_repo": "mtmk-ee/crane-game", "split": "test", "star_events_count": 0} {"blob_id": "f0afe8ad841327bbfd24f44be6d8dda3e0623e57", "bodies": ["context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')", "context.set_code(grpc.StatusCode.UNIMPLEMENTED)\ncontext.set_details('Method not implemented!')\nraise NotImplementedError('Method not implemented!')"], "bodies_text": "<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "class_docstring": "Missing associated documentation comment in .proto file.", "class_name": "CybosPlusProxyServiceServicer", "detected_licenses": ["Apache-2.0", "GPL-1.0-or-later", "GPL-3.0-or-later", "GPL-3.0-only", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CybosPlusProxyServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def Dispatch(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def Property(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def Method(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def Event(self, request_iterator, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000473", "length_bytes": 8133, "license_type": "permissive", "methods": [{"docstring": "Missing associated documentation comment in .proto file.", "name": "Dispatch", "signature": "def Dispatch(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "Property", "signature": "def Property(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "Method", "signature": "def Method(self, request, context)"}, {"docstring": "Missing associated documentation comment in .proto file.", "name": "Event", "signature": "def Event(self, request_iterator, context)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_val_000628", "prompt": "Implement the Python class `CybosPlusProxyServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def Dispatch(self, request, context): Missing associated documentation comment in .proto file.\n- def Property(self, request, context): Missing associated documentation comment in .proto file.\n- def Method(self, request, context): Missing associated documentation comment in .proto file.\n- def Event(self, request_iterator, context): Missing associated documentation comment in .proto file.", "prompted_full_text": "Implement the Python class `CybosPlusProxyServiceServicer` described below.\n\nClass description:\nMissing associated documentation comment in .proto file.\n\nMethod signatures and docstrings:\n- def Dispatch(self, request, context): Missing associated documentation comment in .proto file.\n- def Property(self, request, context): Missing associated documentation comment in .proto file.\n- def Method(self, request, context): Missing associated documentation comment in .proto file.\n- def Event(self, request_iterator, context): Missing associated documentation comment in .proto file.\n\n<|skeleton|>\nclass CybosPlusProxyServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def Dispatch(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def Property(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def Method(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def Event(self, request_iterator, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_0|>\n\n<|body_start_1|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_1|>\n\n<|body_start_2|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_2|>\n\n<|body_start_3|>\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n<|end_body_3|>\n", "revision_id": "2d0e5004074f6a7f62c0301ce1a5b7b0f2037204", "skeleton": "<|skeleton|>\nclass CybosPlusProxyServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def Dispatch(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_0|>\n\n def Property(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_1|>\n\n def Method(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_2|>\n\n def Event(self, request_iterator, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CybosPlusProxyServiceServicer:\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n\n def Dispatch(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Property(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Method(self, request, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n\n def Event(self, request_iterator, context):\n \"\"\"Missing associated documentation comment in .proto file.\"\"\"\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')\n", "source": "the_stack_v2_python_sparse", "source_path": "koapy/backend/daishin_cybos_plus/proxy/CybosPlusProxyService_pb2_grpc.py", "source_repo": "gomtinQQ/koapy", "split": "test", "star_events_count": 0} {"blob_id": "11dff08c6b2881888610fb3008725eec41a3c9cc", "bodies": ["super().__init__(n, mu, **kwargs)\nself.ro = ro\nself.last_e = 0\nself.last_x = np.zeros(n)\nself.last_fi = np.zeros(n)\nself.last_mu = mu", "fi_part = np.eye(self.n) - self.last_mu * np.outer(self.last_x, self.last_x)\nfi = np.dot(fi_part, self.last_fi) + self.last_e * self.last_x\nmu = self.last_mu + self.ro * e * np.dot(self.last_x, fi)\nself.last_e, self.last_mu, self.last_x, self.last_fi = (e, mu, x, fi)\nreturn mu * e * x"], "bodies_text": "<|body_start_0|>\n super().__init__(n, mu, **kwargs)\n self.ro = ro\n self.last_e = 0\n self.last_x = np.zeros(n)\n self.last_fi = np.zeros(n)\n self.last_mu = mu\n<|end_body_0|>\n\n<|body_start_1|>\n fi_part = np.eye(self.n) - self.last_mu * np.outer(self.last_x, self.last_x)\n fi = np.dot(fi_part, self.last_fi) + self.last_e * self.last_x\n mu = self.last_mu + self.ro * e * np.dot(self.last_x, fi)\n self.last_e, self.last_mu, self.last_x, self.last_fi = (e, mu, x, fi)\n return mu * e * x\n<|end_body_1|>\n", "class_docstring": "This class represents an adaptive VSLMS filter with Benveniste's adaptation.", "class_name": "FilterVSLMS_Benveniste", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FilterVSLMS_Benveniste:\n \"\"\"This class represents an adaptive VSLMS filter with Benveniste's adaptation.\"\"\"\n\n def __init__(self, n, mu=1.0, ro=0.1, **kwargs):\n \"\"\"**Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.\"\"\"\n <|body_0|>\n\n def learning_rule(self, e, x):\n \"\"\"Override the parent class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(n, mu, **kwargs)\n self.ro = ro\n self.last_e = 0\n self.last_x = np.zeros(n)\n self.last_fi = np.zeros(n)\n self.last_mu = mu\n<|end_body_0|>\n\n<|body_start_1|>\n fi_part = np.eye(self.n) - self.last_mu * np.outer(self.last_x, self.last_x)\n fi = np.dot(fi_part, self.last_fi) + self.last_e * self.last_x\n mu = self.last_mu + self.ro * e * np.dot(self.last_x, fi)\n self.last_e, self.last_mu, self.last_x, self.last_fi = (e, mu, x, fi)\n return mu * e * x\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000474", "length_bytes": 2661, "license_type": "permissive", "methods": [{"docstring": "**Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.", "name": "__init__", "signature": "def __init__(self, n, mu=1.0, ro=0.1, **kwargs)"}, {"docstring": "Override the parent class.", "name": "learning_rule", "signature": "def learning_rule(self, e, x)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_020926", "prompt": "Implement the Python class `FilterVSLMS_Benveniste` described below.\n\nClass description:\nThis class represents an adaptive VSLMS filter with Benveniste's adaptation.\n\nMethod signatures and docstrings:\n- def __init__(self, n, mu=1.0, ro=0.1, **kwargs): **Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.\n- def learning_rule(self, e, x): Override the parent class.", "prompted_full_text": "Implement the Python class `FilterVSLMS_Benveniste` described below.\n\nClass description:\nThis class represents an adaptive VSLMS filter with Benveniste's adaptation.\n\nMethod signatures and docstrings:\n- def __init__(self, n, mu=1.0, ro=0.1, **kwargs): **Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.\n- def learning_rule(self, e, x): Override the parent class.\n\n<|skeleton|>\nclass FilterVSLMS_Benveniste:\n \"\"\"This class represents an adaptive VSLMS filter with Benveniste's adaptation.\"\"\"\n\n def __init__(self, n, mu=1.0, ro=0.1, **kwargs):\n \"\"\"**Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.\"\"\"\n <|body_0|>\n\n def learning_rule(self, e, x):\n \"\"\"Override the parent class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super().__init__(n, mu, **kwargs)\n self.ro = ro\n self.last_e = 0\n self.last_x = np.zeros(n)\n self.last_fi = np.zeros(n)\n self.last_mu = mu\n<|end_body_0|>\n\n<|body_start_1|>\n fi_part = np.eye(self.n) - self.last_mu * np.outer(self.last_x, self.last_x)\n fi = np.dot(fi_part, self.last_fi) + self.last_e * self.last_x\n mu = self.last_mu + self.ro * e * np.dot(self.last_x, fi)\n self.last_e, self.last_mu, self.last_x, self.last_fi = (e, mu, x, fi)\n return mu * e * x\n<|end_body_1|>\n", "revision_id": "17bfe965b7f6c03b499a5087a1813b18954c2898", "skeleton": "<|skeleton|>\nclass FilterVSLMS_Benveniste:\n \"\"\"This class represents an adaptive VSLMS filter with Benveniste's adaptation.\"\"\"\n\n def __init__(self, n, mu=1.0, ro=0.1, **kwargs):\n \"\"\"**Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.\"\"\"\n <|body_0|>\n\n def learning_rule(self, e, x):\n \"\"\"Override the parent class.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FilterVSLMS_Benveniste:\n \"\"\"This class represents an adaptive VSLMS filter with Benveniste's adaptation.\"\"\"\n\n def __init__(self, n, mu=1.0, ro=0.1, **kwargs):\n \"\"\"**Kwargs:** * `ro` : step size adaptation parameter (float) at the beginning. It is an adaptive parameter.\"\"\"\n super().__init__(n, mu, **kwargs)\n self.ro = ro\n self.last_e = 0\n self.last_x = np.zeros(n)\n self.last_fi = np.zeros(n)\n self.last_mu = mu\n\n def learning_rule(self, e, x):\n \"\"\"Override the parent class.\"\"\"\n fi_part = np.eye(self.n) - self.last_mu * np.outer(self.last_x, self.last_x)\n fi = np.dot(fi_part, self.last_fi) + self.last_e * self.last_x\n mu = self.last_mu + self.ro * e * np.dot(self.last_x, fi)\n self.last_e, self.last_mu, self.last_x, self.last_fi = (e, mu, x, fi)\n return mu * e * x\n", "source": "the_stack_v2_python_sparse", "source_path": "padasip/filters/vslms_benveniste.py", "source_repo": "matousc89/padasip", "split": "test", "star_events_count": 269} {"blob_id": "21eed16a383edd5c27ef287a80c4ae234090a5ef", "bodies": ["AgentType.__init__(self, solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_), time_flow=time_flow, pseudo_terminal=False, **kwds)\nself.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)\nself.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)\nself.solveOnePeriod = Model.solveConsIndShock\nself.update()", "self.initializeSim()\nself.simConsHistory()\nself.W_history = self.pHist * self.bHist / self.Rfree\nif Params.do_lifecycle:\n self.W_history = self.W_history * self.cohort_scale\nself.kappa_history = 1.0 - (1.0 - self.MPChist) ** 4", "orig_flow = self.time_flow\nif self.cycles == 0:\n self.updateIncomeProcessAlt()\nelse:\n self.updateIncomeProcess()\nself.updateAssetsGrid()\nself.updateSolutionTerminal()\nself.timeFwd()\nself.resetRNG()\nif self.cycles > 0:\n self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn, tax_rate=self.tax_rate, T_retire=self.T_retire, unemployed_indices=range(0, (self.TranShkCount + 1) * self.PermShkCount, self.TranShkCount + 1))\nself.makeIncShkHist()\nif not orig_flow:\n self.timeRev()", "tax_rate = self.IncUnemp * self.UnempPrb / (self.l_bar * (1.0 - self.UnempPrb))\nTranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount, sigma=self.TranShkStd[0], tail_N=0))\nTranShkDstn[0] = np.insert(TranShkDstn[0] * (1.0 - self.UnempPrb), 0, self.UnempPrb)\nTranShkDstn[1] = np.insert(self.l_bar * TranShkDstn[1] * (1.0 - tax_rate), 0, self.IncUnemp)\nPermShkDstn = approxMeanOneLognormal(self.PermShkCount, sigma=self.PermShkStd[0], tail_N=0)\nself.IncomeDstn = [combineIndepDstns(PermShkDstn, TranShkDstn)]\nself.TranShkDstn = TranShkDstn\nself.PermShkDstn = PermShkDstn\nself.addToTimeVary('IncomeDstn')"], "bodies_text": "<|body_start_0|>\n AgentType.__init__(self, solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_), time_flow=time_flow, pseudo_terminal=False, **kwds)\n self.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)\n self.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)\n self.solveOnePeriod = Model.solveConsIndShock\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.initializeSim()\n self.simConsHistory()\n self.W_history = self.pHist * self.bHist / self.Rfree\n if Params.do_lifecycle:\n self.W_history = self.W_history * self.cohort_scale\n self.kappa_history = 1.0 - (1.0 - self.MPChist) ** 4\n<|end_body_1|>\n\n<|body_start_2|>\n orig_flow = self.time_flow\n if self.cycles == 0:\n self.updateIncomeProcessAlt()\n else:\n self.updateIncomeProcess()\n self.updateAssetsGrid()\n self.updateSolutionTerminal()\n self.timeFwd()\n self.resetRNG()\n if self.cycles > 0:\n self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn, tax_rate=self.tax_rate, T_retire=self.T_retire, unemployed_indices=range(0, (self.TranShkCount + 1) * self.PermShkCount, self.TranShkCount + 1))\n self.makeIncShkHist()\n if not orig_flow:\n self.timeRev()\n<|end_body_2|>\n\n<|body_start_3|>\n tax_rate = self.IncUnemp * self.UnempPrb / (self.l_bar * (1.0 - self.UnempPrb))\n TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount, sigma=self.TranShkStd[0], tail_N=0))\n TranShkDstn[0] = np.insert(TranShkDstn[0] * (1.0 - self.UnempPrb), 0, self.UnempPrb)\n TranShkDstn[1] = np.insert(self.l_bar * TranShkDstn[1] * (1.0 - tax_rate), 0, self.IncUnemp)\n PermShkDstn = approxMeanOneLognormal(self.PermShkCount, sigma=self.PermShkStd[0], tail_N=0)\n self.IncomeDstn = [combineIndepDstns(PermShkDstn, TranShkDstn)]\n self.TranShkDstn = TranShkDstn\n self.PermShkDstn = PermShkDstn\n self.addToTimeVary('IncomeDstn')\n<|end_body_3|>\n", "class_docstring": "A consumer type in the cstwMPC model; a slight modification of base ConsumerType.", "class_name": "cstwMPCagent", "detected_licenses": ["LicenseRef-scancode-warranty-disclaimer", "Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass cstwMPCagent:\n \"\"\"A consumer type in the cstwMPC model; a slight modification of base ConsumerType.\"\"\"\n\n def __init__(self, time_flow=True, **kwds):\n \"\"\"Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent\"\"\"\n <|body_0|>\n\n def simulateCSTW(self):\n \"\"\"The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none\"\"\"\n <|body_1|>\n\n def update(self):\n \"\"\"Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none\"\"\"\n <|body_2|>\n\n def updateIncomeProcessAlt(self):\n \"\"\"An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n AgentType.__init__(self, solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_), time_flow=time_flow, pseudo_terminal=False, **kwds)\n self.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)\n self.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)\n self.solveOnePeriod = Model.solveConsIndShock\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.initializeSim()\n self.simConsHistory()\n self.W_history = self.pHist * self.bHist / self.Rfree\n if Params.do_lifecycle:\n self.W_history = self.W_history * self.cohort_scale\n self.kappa_history = 1.0 - (1.0 - self.MPChist) ** 4\n<|end_body_1|>\n\n<|body_start_2|>\n orig_flow = self.time_flow\n if self.cycles == 0:\n self.updateIncomeProcessAlt()\n else:\n self.updateIncomeProcess()\n self.updateAssetsGrid()\n self.updateSolutionTerminal()\n self.timeFwd()\n self.resetRNG()\n if self.cycles > 0:\n self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn, tax_rate=self.tax_rate, T_retire=self.T_retire, unemployed_indices=range(0, (self.TranShkCount + 1) * self.PermShkCount, self.TranShkCount + 1))\n self.makeIncShkHist()\n if not orig_flow:\n self.timeRev()\n<|end_body_2|>\n\n<|body_start_3|>\n tax_rate = self.IncUnemp * self.UnempPrb / (self.l_bar * (1.0 - self.UnempPrb))\n TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount, sigma=self.TranShkStd[0], tail_N=0))\n TranShkDstn[0] = np.insert(TranShkDstn[0] * (1.0 - self.UnempPrb), 0, self.UnempPrb)\n TranShkDstn[1] = np.insert(self.l_bar * TranShkDstn[1] * (1.0 - tax_rate), 0, self.IncUnemp)\n PermShkDstn = approxMeanOneLognormal(self.PermShkCount, sigma=self.PermShkStd[0], tail_N=0)\n self.IncomeDstn = [combineIndepDstns(PermShkDstn, TranShkDstn)]\n self.TranShkDstn = TranShkDstn\n self.PermShkDstn = PermShkDstn\n self.addToTimeVary('IncomeDstn')\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000475", "length_bytes": 40408, "license_type": "permissive", "methods": [{"docstring": "Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent", "name": "__init__", "signature": "def __init__(self, time_flow=True, **kwds)"}, {"docstring": "The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none", "name": "simulateCSTW", "signature": "def simulateCSTW(self)"}, {"docstring": "Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none", "name": "update", "signature": "def update(self)"}, {"docstring": "An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none", "name": "updateIncomeProcessAlt", "signature": "def updateIncomeProcessAlt(self)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_000851", "prompt": "Implement the Python class `cstwMPCagent` described below.\n\nClass description:\nA consumer type in the cstwMPC model; a slight modification of base ConsumerType.\n\nMethod signatures and docstrings:\n- def __init__(self, time_flow=True, **kwds): Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent\n- def simulateCSTW(self): The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none\n- def update(self): Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none\n- def updateIncomeProcessAlt(self): An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none", "prompted_full_text": "Implement the Python class `cstwMPCagent` described below.\n\nClass description:\nA consumer type in the cstwMPC model; a slight modification of base ConsumerType.\n\nMethod signatures and docstrings:\n- def __init__(self, time_flow=True, **kwds): Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent\n- def simulateCSTW(self): The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none\n- def update(self): Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none\n- def updateIncomeProcessAlt(self): An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none\n\n<|skeleton|>\nclass cstwMPCagent:\n \"\"\"A consumer type in the cstwMPC model; a slight modification of base ConsumerType.\"\"\"\n\n def __init__(self, time_flow=True, **kwds):\n \"\"\"Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent\"\"\"\n <|body_0|>\n\n def simulateCSTW(self):\n \"\"\"The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none\"\"\"\n <|body_1|>\n\n def update(self):\n \"\"\"Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none\"\"\"\n <|body_2|>\n\n def updateIncomeProcessAlt(self):\n \"\"\"An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n AgentType.__init__(self, solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_), time_flow=time_flow, pseudo_terminal=False, **kwds)\n self.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)\n self.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)\n self.solveOnePeriod = Model.solveConsIndShock\n self.update()\n<|end_body_0|>\n\n<|body_start_1|>\n self.initializeSim()\n self.simConsHistory()\n self.W_history = self.pHist * self.bHist / self.Rfree\n if Params.do_lifecycle:\n self.W_history = self.W_history * self.cohort_scale\n self.kappa_history = 1.0 - (1.0 - self.MPChist) ** 4\n<|end_body_1|>\n\n<|body_start_2|>\n orig_flow = self.time_flow\n if self.cycles == 0:\n self.updateIncomeProcessAlt()\n else:\n self.updateIncomeProcess()\n self.updateAssetsGrid()\n self.updateSolutionTerminal()\n self.timeFwd()\n self.resetRNG()\n if self.cycles > 0:\n self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn, tax_rate=self.tax_rate, T_retire=self.T_retire, unemployed_indices=range(0, (self.TranShkCount + 1) * self.PermShkCount, self.TranShkCount + 1))\n self.makeIncShkHist()\n if not orig_flow:\n self.timeRev()\n<|end_body_2|>\n\n<|body_start_3|>\n tax_rate = self.IncUnemp * self.UnempPrb / (self.l_bar * (1.0 - self.UnempPrb))\n TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount, sigma=self.TranShkStd[0], tail_N=0))\n TranShkDstn[0] = np.insert(TranShkDstn[0] * (1.0 - self.UnempPrb), 0, self.UnempPrb)\n TranShkDstn[1] = np.insert(self.l_bar * TranShkDstn[1] * (1.0 - tax_rate), 0, self.IncUnemp)\n PermShkDstn = approxMeanOneLognormal(self.PermShkCount, sigma=self.PermShkStd[0], tail_N=0)\n self.IncomeDstn = [combineIndepDstns(PermShkDstn, TranShkDstn)]\n self.TranShkDstn = TranShkDstn\n self.PermShkDstn = PermShkDstn\n self.addToTimeVary('IncomeDstn')\n<|end_body_3|>\n", "revision_id": "9e7d3cabd7885dedcc1ea49ac5bd5bdb08b4615a", "skeleton": "<|skeleton|>\nclass cstwMPCagent:\n \"\"\"A consumer type in the cstwMPC model; a slight modification of base ConsumerType.\"\"\"\n\n def __init__(self, time_flow=True, **kwds):\n \"\"\"Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent\"\"\"\n <|body_0|>\n\n def simulateCSTW(self):\n \"\"\"The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none\"\"\"\n <|body_1|>\n\n def update(self):\n \"\"\"Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none\"\"\"\n <|body_2|>\n\n def updateIncomeProcessAlt(self):\n \"\"\"An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class cstwMPCagent:\n \"\"\"A consumer type in the cstwMPC model; a slight modification of base ConsumerType.\"\"\"\n\n def __init__(self, time_flow=True, **kwds):\n \"\"\"Make a new consumer type for the cstwMPC model. Parameters ---------- time_flow : boolean Indictator for whether time is \"flowing\" forward for this agent. **kwds : keyword arguments Any number of keyword arguments of the form key=value. Each value will be assigned to the attribute named in self. Returns ------- new instance of cstwMPCagent\"\"\"\n AgentType.__init__(self, solution_terminal=deepcopy(Model.IndShockConsumerType.solution_terminal_), time_flow=time_flow, pseudo_terminal=False, **kwds)\n self.time_vary = deepcopy(Model.IndShockConsumerType.time_vary_)\n self.time_inv = deepcopy(Model.IndShockConsumerType.time_inv_)\n self.solveOnePeriod = Model.solveConsIndShock\n self.update()\n\n def simulateCSTW(self):\n \"\"\"The simulation method for the no aggregate shocks version of the model. Initializes the agent type, simulates a history of state and control variables, and stores the wealth history in self.W_history and the annualized MPC history in self.kappa_history. Parameters ---------- none Returns ------- none\"\"\"\n self.initializeSim()\n self.simConsHistory()\n self.W_history = self.pHist * self.bHist / self.Rfree\n if Params.do_lifecycle:\n self.W_history = self.W_history * self.cohort_scale\n self.kappa_history = 1.0 - (1.0 - self.MPChist) ** 4\n\n def update(self):\n \"\"\"Update the income process, the assets grid, and the terminal solution. Parameters ---------- none Returns ------- none\"\"\"\n orig_flow = self.time_flow\n if self.cycles == 0:\n self.updateIncomeProcessAlt()\n else:\n self.updateIncomeProcess()\n self.updateAssetsGrid()\n self.updateSolutionTerminal()\n self.timeFwd()\n self.resetRNG()\n if self.cycles > 0:\n self.IncomeDstn = Model.applyFlatIncomeTax(self.IncomeDstn, tax_rate=self.tax_rate, T_retire=self.T_retire, unemployed_indices=range(0, (self.TranShkCount + 1) * self.PermShkCount, self.TranShkCount + 1))\n self.makeIncShkHist()\n if not orig_flow:\n self.timeRev()\n\n def updateIncomeProcessAlt(self):\n \"\"\"An alternative method for constructing the income process in the infinite horizon model, where the labor supply l_bar creates a small oddity. Parameters ---------- none Returns ------- none\"\"\"\n tax_rate = self.IncUnemp * self.UnempPrb / (self.l_bar * (1.0 - self.UnempPrb))\n TranShkDstn = deepcopy(approxMeanOneLognormal(self.TranShkCount, sigma=self.TranShkStd[0], tail_N=0))\n TranShkDstn[0] = np.insert(TranShkDstn[0] * (1.0 - self.UnempPrb), 0, self.UnempPrb)\n TranShkDstn[1] = np.insert(self.l_bar * TranShkDstn[1] * (1.0 - tax_rate), 0, self.IncUnemp)\n PermShkDstn = approxMeanOneLognormal(self.PermShkCount, sigma=self.PermShkStd[0], tail_N=0)\n self.IncomeDstn = [combineIndepDstns(PermShkDstn, TranShkDstn)]\n self.TranShkDstn = TranShkDstn\n self.PermShkDstn = PermShkDstn\n self.addToTimeVary('IncomeDstn')\n", "source": "the_stack_v2_python_sparse", "source_path": "cstwMPC/cstwMPCold.py", "source_repo": "jackiekazil/HARK", "split": "test", "star_events_count": 1} {"blob_id": "564b6d5ecdcd7a342fafb3de2ea380a7b21b3714", "bodies": ["mod_obj = self.pool.get('ir.model.data')\nact_obj = self.pool.get('ir.actions.act_window')\nproject = self.browse(cr, uid, ids[0], context)\nview_context = {'search_default_account_id': [project.analytic_account_id.id], 'default_account_id': project.analytic_account_id.id, 'default_is_timesheet': True}\nhelp = _('

Record your timesheets for the project \\'%s\\'.

') % (project.name,)\nres = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')\nid = res and res[1] or False\nresult = act_obj.read(cr, uid, [id], context=context)[0]\nresult['name'] = _('Timesheets')\nresult['context'] = view_context\nresult['help'] = help\nreturn result", "res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'project_timesheet', 'action_project_analytic_account', context=context)\ncontract_ids = self.browse(cr, uid, ids, context=context)\naccount_ids = [x.analytic_account_id.id for x in contract_ids]\nres['res_id'] = account_ids and account_ids[0] or None\nreturn res"], "bodies_text": "<|body_start_0|>\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n project = self.browse(cr, uid, ids[0], context)\n view_context = {'search_default_account_id': [project.analytic_account_id.id], 'default_account_id': project.analytic_account_id.id, 'default_is_timesheet': True}\n help = _('

Record your timesheets for the project \\'%s\\'.

') % (project.name,)\n res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')\n id = res and res[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n result['name'] = _('Timesheets')\n result['context'] = view_context\n result['help'] = help\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'project_timesheet', 'action_project_analytic_account', context=context)\n contract_ids = self.browse(cr, uid, ids, context=context)\n account_ids = [x.analytic_account_id.id for x in contract_ids]\n res['res_id'] = account_ids and account_ids[0] or None\n return res\n<|end_body_1|>\n", "class_docstring": "", "class_name": "project_project", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass project_project:\n\n def open_timesheets(self, cr, uid, ids, context=None):\n \"\"\"open Timesheets view\"\"\"\n <|body_0|>\n\n def open_contract(self, cr, uid, ids, context=None):\n \"\"\"open Contract view\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n project = self.browse(cr, uid, ids[0], context)\n view_context = {'search_default_account_id': [project.analytic_account_id.id], 'default_account_id': project.analytic_account_id.id, 'default_is_timesheet': True}\n help = _('

Record your timesheets for the project \\'%s\\'.

') % (project.name,)\n res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')\n id = res and res[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n result['name'] = _('Timesheets')\n result['context'] = view_context\n result['help'] = help\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'project_timesheet', 'action_project_analytic_account', context=context)\n contract_ids = self.browse(cr, uid, ids, context=context)\n account_ids = [x.analytic_account_id.id for x in contract_ids]\n res['res_id'] = account_ids and account_ids[0] or None\n return res\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000476", "length_bytes": 7471, "license_type": "no_license", "methods": [{"docstring": "open Timesheets view", "name": "open_timesheets", "signature": "def open_timesheets(self, cr, uid, ids, context=None)"}, {"docstring": "open Contract view", "name": "open_contract", "signature": "def open_contract(self, cr, uid, ids, context=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_036463", "prompt": "Implement the Python class `project_project` described below.\n\nClass description:\nImplement the project_project class.\n\nMethod signatures and docstrings:\n- def open_timesheets(self, cr, uid, ids, context=None): open Timesheets view\n- def open_contract(self, cr, uid, ids, context=None): open Contract view", "prompted_full_text": "Implement the Python class `project_project` described below.\n\nClass description:\nImplement the project_project class.\n\nMethod signatures and docstrings:\n- def open_timesheets(self, cr, uid, ids, context=None): open Timesheets view\n- def open_contract(self, cr, uid, ids, context=None): open Contract view\n\n<|skeleton|>\nclass project_project:\n\n def open_timesheets(self, cr, uid, ids, context=None):\n \"\"\"open Timesheets view\"\"\"\n <|body_0|>\n\n def open_contract(self, cr, uid, ids, context=None):\n \"\"\"open Contract view\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n project = self.browse(cr, uid, ids[0], context)\n view_context = {'search_default_account_id': [project.analytic_account_id.id], 'default_account_id': project.analytic_account_id.id, 'default_is_timesheet': True}\n help = _('

Record your timesheets for the project \\'%s\\'.

') % (project.name,)\n res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')\n id = res and res[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n result['name'] = _('Timesheets')\n result['context'] = view_context\n result['help'] = help\n return result\n<|end_body_0|>\n\n<|body_start_1|>\n res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'project_timesheet', 'action_project_analytic_account', context=context)\n contract_ids = self.browse(cr, uid, ids, context=context)\n account_ids = [x.analytic_account_id.id for x in contract_ids]\n res['res_id'] = account_ids and account_ids[0] or None\n return res\n<|end_body_1|>\n", "revision_id": "5a4fd72991c846d5cb7c5082f6bdfef5b2bca572", "skeleton": "<|skeleton|>\nclass project_project:\n\n def open_timesheets(self, cr, uid, ids, context=None):\n \"\"\"open Timesheets view\"\"\"\n <|body_0|>\n\n def open_contract(self, cr, uid, ids, context=None):\n \"\"\"open Contract view\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class project_project:\n def open_timesheets(self, cr, uid, ids, context=None):\n \"\"\"open Timesheets view\"\"\"\n mod_obj = self.pool.get('ir.model.data')\n act_obj = self.pool.get('ir.actions.act_window')\n project = self.browse(cr, uid, ids[0], context)\n view_context = {'search_default_account_id': [project.analytic_account_id.id], 'default_account_id': project.analytic_account_id.id, 'default_is_timesheet': True}\n help = _('

Record your timesheets for the project \\'%s\\'.

') % (project.name,)\n res = mod_obj.get_object_reference(cr, uid, 'hr_timesheet', 'act_hr_timesheet_line_evry1_all_form')\n id = res and res[1] or False\n result = act_obj.read(cr, uid, [id], context=context)[0]\n result['name'] = _('Timesheets')\n result['context'] = view_context\n result['help'] = help\n return result\n\n def open_contract(self, cr, uid, ids, context=None):\n \"\"\"open Contract view\"\"\"\n res = self.pool['ir.actions.act_window'].for_xml_id(cr, uid, 'project_timesheet', 'action_project_analytic_account', context=context)\n contract_ids = self.browse(cr, uid, ids, context=context)\n account_ids = [x.analytic_account_id.id for x in contract_ids]\n res['res_id'] = account_ids and account_ids[0] or None\n return res\n", "source": "the_stack_v2_python_sparse", "source_path": "yuancloud/addons/project_timesheet/project_timesheet.py", "source_repo": "cash2one/yuancloud", "split": "test", "star_events_count": 0} {"blob_id": "f19c4c637771d24e88293aa9e1654986779bfca3", "bodies": ["send = GlobalsatHandler.translateConfigOptions(self, send, options)\nif 'Ri' in options:\n send['freq_mov'] = options['Ri']\nif 'Ra' in options:\n send['freq_idle'] = options['Ra']\nif 'Ro' in options:\n send['send_mov'] = options['Ro']\nif 'S8' in options:\n send['send_by_angle'] = options['S8']\nreturn send", "packet = GlobalsatHandler.translate(self, data)\nsensor = packet['sensors'] or {}\nfor char in data:\n value = data[char]\n if value == '':\n value = '0'\n if char == 'a':\n sensor['ain0'] = float(value)\n if char == 'c':\n sensor['gsm_signal_strength'] = float(value)\n if char == 'n':\n intBatteryLevel = self.formatBatteryLevel(value)\n extBatteryVoltage = self.formatExtBatteryVoltage(value)\n sensor['int_battery_level'] = intBatteryLevel\n sensor['ext_battery_voltage'] = extBatteryVoltage\npacket['sensors'] = sensor\nreturn packet", "command = GlobalsatHandler.addCommandSetOptions(self, data)\nfor item in data:\n val = str(item['value'])\n if item['option'] == 'freq_mov':\n command += ',Ri=' + val\n elif item['option'] == 'freq_idle':\n command += ',Ra=' + val\n elif item['option'] == 'send_mov':\n command += ',Ro=' + val\n elif item['option'] == 'send_by_angle':\n command += ',S8=' + val\nreturn command"], "bodies_text": "<|body_start_0|>\n send = GlobalsatHandler.translateConfigOptions(self, send, options)\n if 'Ri' in options:\n send['freq_mov'] = options['Ri']\n if 'Ra' in options:\n send['freq_idle'] = options['Ra']\n if 'Ro' in options:\n send['send_mov'] = options['Ro']\n if 'S8' in options:\n send['send_by_angle'] = options['S8']\n return send\n<|end_body_0|>\n\n<|body_start_1|>\n packet = GlobalsatHandler.translate(self, data)\n sensor = packet['sensors'] or {}\n for char in data:\n value = data[char]\n if value == '':\n value = '0'\n if char == 'a':\n sensor['ain0'] = float(value)\n if char == 'c':\n sensor['gsm_signal_strength'] = float(value)\n if char == 'n':\n intBatteryLevel = self.formatBatteryLevel(value)\n extBatteryVoltage = self.formatExtBatteryVoltage(value)\n sensor['int_battery_level'] = intBatteryLevel\n sensor['ext_battery_voltage'] = extBatteryVoltage\n packet['sensors'] = sensor\n return packet\n<|end_body_1|>\n\n<|body_start_2|>\n command = GlobalsatHandler.addCommandSetOptions(self, data)\n for item in data:\n val = str(item['value'])\n if item['option'] == 'freq_mov':\n command += ',Ri=' + val\n elif item['option'] == 'freq_idle':\n command += ',Ra=' + val\n elif item['option'] == 'send_mov':\n command += ',Ro=' + val\n elif item['option'] == 'send_by_angle':\n command += ',S8=' + val\n return command\n<|end_body_2|>\n", "class_docstring": "Globalsat. GTR-128/GTR-129", "class_name": "Handler", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Handler:\n \"\"\"Globalsat. GTR-128/GTR-129\"\"\"\n\n def translateConfigOptions(self, send, options):\n \"\"\"Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options\"\"\"\n <|body_0|>\n\n def translate(self, data):\n \"\"\"Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker\"\"\"\n <|body_1|>\n\n def addCommandSetOptions(self, data):\n \"\"\"Add device options @param data: data dict()\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n send = GlobalsatHandler.translateConfigOptions(self, send, options)\n if 'Ri' in options:\n send['freq_mov'] = options['Ri']\n if 'Ra' in options:\n send['freq_idle'] = options['Ra']\n if 'Ro' in options:\n send['send_mov'] = options['Ro']\n if 'S8' in options:\n send['send_by_angle'] = options['S8']\n return send\n<|end_body_0|>\n\n<|body_start_1|>\n packet = GlobalsatHandler.translate(self, data)\n sensor = packet['sensors'] or {}\n for char in data:\n value = data[char]\n if value == '':\n value = '0'\n if char == 'a':\n sensor['ain0'] = float(value)\n if char == 'c':\n sensor['gsm_signal_strength'] = float(value)\n if char == 'n':\n intBatteryLevel = self.formatBatteryLevel(value)\n extBatteryVoltage = self.formatExtBatteryVoltage(value)\n sensor['int_battery_level'] = intBatteryLevel\n sensor['ext_battery_voltage'] = extBatteryVoltage\n packet['sensors'] = sensor\n return packet\n<|end_body_1|>\n\n<|body_start_2|>\n command = GlobalsatHandler.addCommandSetOptions(self, data)\n for item in data:\n val = str(item['value'])\n if item['option'] == 'freq_mov':\n command += ',Ri=' + val\n elif item['option'] == 'freq_idle':\n command += ',Ra=' + val\n elif item['option'] == 'send_mov':\n command += ',Ro=' + val\n elif item['option'] == 'send_by_angle':\n command += ',S8=' + val\n return command\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000477", "length_bytes": 4416, "license_type": "no_license", "methods": [{"docstring": "Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options", "name": "translateConfigOptions", "signature": "def translateConfigOptions(self, send, options)"}, {"docstring": "Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker", "name": "translate", "signature": "def translate(self, data)"}, {"docstring": "Add device options @param data: data dict()", "name": "addCommandSetOptions", "signature": "def addCommandSetOptions(self, data)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_test_001072", "prompt": "Implement the Python class `Handler` described below.\n\nClass description:\nGlobalsat. GTR-128/GTR-129\n\nMethod signatures and docstrings:\n- def translateConfigOptions(self, send, options): Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options\n- def translate(self, data): Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker\n- def addCommandSetOptions(self, data): Add device options @param data: data dict()", "prompted_full_text": "Implement the Python class `Handler` described below.\n\nClass description:\nGlobalsat. GTR-128/GTR-129\n\nMethod signatures and docstrings:\n- def translateConfigOptions(self, send, options): Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options\n- def translate(self, data): Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker\n- def addCommandSetOptions(self, data): Add device options @param data: data dict()\n\n<|skeleton|>\nclass Handler:\n \"\"\"Globalsat. GTR-128/GTR-129\"\"\"\n\n def translateConfigOptions(self, send, options):\n \"\"\"Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options\"\"\"\n <|body_0|>\n\n def translate(self, data):\n \"\"\"Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker\"\"\"\n <|body_1|>\n\n def addCommandSetOptions(self, data):\n \"\"\"Add device options @param data: data dict()\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n send = GlobalsatHandler.translateConfigOptions(self, send, options)\n if 'Ri' in options:\n send['freq_mov'] = options['Ri']\n if 'Ra' in options:\n send['freq_idle'] = options['Ra']\n if 'Ro' in options:\n send['send_mov'] = options['Ro']\n if 'S8' in options:\n send['send_by_angle'] = options['S8']\n return send\n<|end_body_0|>\n\n<|body_start_1|>\n packet = GlobalsatHandler.translate(self, data)\n sensor = packet['sensors'] or {}\n for char in data:\n value = data[char]\n if value == '':\n value = '0'\n if char == 'a':\n sensor['ain0'] = float(value)\n if char == 'c':\n sensor['gsm_signal_strength'] = float(value)\n if char == 'n':\n intBatteryLevel = self.formatBatteryLevel(value)\n extBatteryVoltage = self.formatExtBatteryVoltage(value)\n sensor['int_battery_level'] = intBatteryLevel\n sensor['ext_battery_voltage'] = extBatteryVoltage\n packet['sensors'] = sensor\n return packet\n<|end_body_1|>\n\n<|body_start_2|>\n command = GlobalsatHandler.addCommandSetOptions(self, data)\n for item in data:\n val = str(item['value'])\n if item['option'] == 'freq_mov':\n command += ',Ri=' + val\n elif item['option'] == 'freq_idle':\n command += ',Ra=' + val\n elif item['option'] == 'send_mov':\n command += ',Ro=' + val\n elif item['option'] == 'send_by_angle':\n command += ',S8=' + val\n return command\n<|end_body_2|>\n", "revision_id": "4a4bc730252ece695b2773388812e2d59d4947ce", "skeleton": "<|skeleton|>\nclass Handler:\n \"\"\"Globalsat. GTR-128/GTR-129\"\"\"\n\n def translateConfigOptions(self, send, options):\n \"\"\"Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options\"\"\"\n <|body_0|>\n\n def translate(self, data):\n \"\"\"Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker\"\"\"\n <|body_1|>\n\n def addCommandSetOptions(self, data):\n \"\"\"Add device options @param data: data dict()\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Handler:\n \"\"\"Globalsat. GTR-128/GTR-129\"\"\"\n\n def translateConfigOptions(self, send, options):\n \"\"\"Translate gps-tracker parsed options to observer format @param send: {string[]} data to send @param options: {string[]} parsed options\"\"\"\n send = GlobalsatHandler.translateConfigOptions(self, send, options)\n if 'Ri' in options:\n send['freq_mov'] = options['Ri']\n if 'Ra' in options:\n send['freq_idle'] = options['Ra']\n if 'Ro' in options:\n send['send_mov'] = options['Ro']\n if 'S8' in options:\n send['send_by_angle'] = options['S8']\n return send\n\n def translate(self, data):\n \"\"\"Translate gps-tracker data to observer pipe format @param data: dict() data from gps-tracker\"\"\"\n packet = GlobalsatHandler.translate(self, data)\n sensor = packet['sensors'] or {}\n for char in data:\n value = data[char]\n if value == '':\n value = '0'\n if char == 'a':\n sensor['ain0'] = float(value)\n if char == 'c':\n sensor['gsm_signal_strength'] = float(value)\n if char == 'n':\n intBatteryLevel = self.formatBatteryLevel(value)\n extBatteryVoltage = self.formatExtBatteryVoltage(value)\n sensor['int_battery_level'] = intBatteryLevel\n sensor['ext_battery_voltage'] = extBatteryVoltage\n packet['sensors'] = sensor\n return packet\n\n def addCommandSetOptions(self, data):\n \"\"\"Add device options @param data: data dict()\"\"\"\n command = GlobalsatHandler.addCommandSetOptions(self, data)\n for item in data:\n val = str(item['value'])\n if item['option'] == 'freq_mov':\n command += ',Ri=' + val\n elif item['option'] == 'freq_idle':\n command += ',Ra=' + val\n elif item['option'] == 'send_mov':\n command += ',Ro=' + val\n elif item['option'] == 'send_by_angle':\n command += ',S8=' + val\n return command\n", "source": "the_stack_v2_python_sparse", "source_path": "lib/handlers/globalsat/gtr128.py", "source_repo": "maprox/pipe", "split": "test", "star_events_count": 4} {"blob_id": "90afd1dab76ae9ed479d493650428792fe2b0dea", "bodies": ["self.handler = handler\nself.parser = ElementTree.XMLParser(target=ParserTarget(handler))\nself.lock = threading.RLock()\nself.in_use = False\nself._started = False", "with self.lock:\n if self.in_use:\n raise StreamParseError('StreamReader.feed() is not reentrant!')\n self.in_use = True\n try:\n if not self._started:\n if len(data) > 1:\n self.parser.feed(data[:1])\n data = data[1:]\n self._started = True\n if data:\n self.parser.feed(data)\n else:\n self.parser.close()\n except ElementTree.ParseError as err:\n self.handler.stream_parse_error(str(err))\n finally:\n self.in_use = False"], "bodies_text": "<|body_start_0|>\n self.handler = handler\n self.parser = ElementTree.XMLParser(target=ParserTarget(handler))\n self.lock = threading.RLock()\n self.in_use = False\n self._started = False\n<|end_body_0|>\n\n<|body_start_1|>\n with self.lock:\n if self.in_use:\n raise StreamParseError('StreamReader.feed() is not reentrant!')\n self.in_use = True\n try:\n if not self._started:\n if len(data) > 1:\n self.parser.feed(data[:1])\n data = data[1:]\n self._started = True\n if data:\n self.parser.feed(data)\n else:\n self.parser.close()\n except ElementTree.ParseError as err:\n self.handler.stream_parse_error(str(err))\n finally:\n self.in_use = False\n<|end_body_1|>\n", "class_docstring": "XML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`", "class_name": "StreamReader", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass StreamReader:\n \"\"\"XML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`\"\"\"\n\n def __init__(self, handler):\n \"\"\"Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`\"\"\"\n <|body_0|>\n\n def feed(self, data):\n \"\"\"Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.handler = handler\n self.parser = ElementTree.XMLParser(target=ParserTarget(handler))\n self.lock = threading.RLock()\n self.in_use = False\n self._started = False\n<|end_body_0|>\n\n<|body_start_1|>\n with self.lock:\n if self.in_use:\n raise StreamParseError('StreamReader.feed() is not reentrant!')\n self.in_use = True\n try:\n if not self._started:\n if len(data) > 1:\n self.parser.feed(data[:1])\n data = data[1:]\n self._started = True\n if data:\n self.parser.feed(data)\n else:\n self.parser.close()\n except ElementTree.ParseError as err:\n self.handler.stream_parse_error(str(err))\n finally:\n self.in_use = False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000478", "length_bytes": 7257, "license_type": "permissive", "methods": [{"docstring": "Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`", "name": "__init__", "signature": "def __init__(self, handler)"}, {"docstring": "Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`", "name": "feed", "signature": "def feed(self, data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028893", "prompt": "Implement the Python class `StreamReader` described below.\n\nClass description:\nXML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`\n\nMethod signatures and docstrings:\n- def __init__(self, handler): Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`\n- def feed(self, data): Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`", "prompted_full_text": "Implement the Python class `StreamReader` described below.\n\nClass description:\nXML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`\n\nMethod signatures and docstrings:\n- def __init__(self, handler): Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`\n- def feed(self, data): Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`\n\n<|skeleton|>\nclass StreamReader:\n \"\"\"XML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`\"\"\"\n\n def __init__(self, handler):\n \"\"\"Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`\"\"\"\n <|body_0|>\n\n def feed(self, data):\n \"\"\"Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.handler = handler\n self.parser = ElementTree.XMLParser(target=ParserTarget(handler))\n self.lock = threading.RLock()\n self.in_use = False\n self._started = False\n<|end_body_0|>\n\n<|body_start_1|>\n with self.lock:\n if self.in_use:\n raise StreamParseError('StreamReader.feed() is not reentrant!')\n self.in_use = True\n try:\n if not self._started:\n if len(data) > 1:\n self.parser.feed(data[:1])\n data = data[1:]\n self._started = True\n if data:\n self.parser.feed(data)\n else:\n self.parser.close()\n except ElementTree.ParseError as err:\n self.handler.stream_parse_error(str(err))\n finally:\n self.in_use = False\n<|end_body_1|>\n", "revision_id": "26402a08fc46b09ef94e8d7a6bbc3a54ff9d0891", "skeleton": "<|skeleton|>\nclass StreamReader:\n \"\"\"XML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`\"\"\"\n\n def __init__(self, handler):\n \"\"\"Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`\"\"\"\n <|body_0|>\n\n def feed(self, data):\n \"\"\"Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class StreamReader:\n \"\"\"XML stream reader. :Ivariables: - `handler`: object to receive parsed stream elements - `parser`: the xml parser - `lock`: lock to protect the object - `in_use`: re-entrancy protection - `_started`: flag set after the first byte is pushed to the parser :Types: - `handler`: `XMLStreamHandler` - `parser`: :etree:`ElementTree.XMLParser` - `lock`: :std:`threading.RLock` - `in_use`: `bool` - `_started`: `bool`\"\"\"\n\n def __init__(self, handler):\n \"\"\"Initialize the reader. :Parameters: - `handler`: Object to handle stream start, end and stanzas. :Types: - `handler`: `XMLStreamHandler`\"\"\"\n self.handler = handler\n self.parser = ElementTree.XMLParser(target=ParserTarget(handler))\n self.lock = threading.RLock()\n self.in_use = False\n self._started = False\n\n def feed(self, data):\n \"\"\"Feed the parser with a chunk of data. Apropriate methods of `handler` will be called whenever something interesting is found. :Parameters: - `data`: the chunk of data to parse. :Types: - `data`: `str`\"\"\"\n with self.lock:\n if self.in_use:\n raise StreamParseError('StreamReader.feed() is not reentrant!')\n self.in_use = True\n try:\n if not self._started:\n if len(data) > 1:\n self.parser.feed(data[:1])\n data = data[1:]\n self._started = True\n if data:\n self.parser.feed(data)\n else:\n self.parser.close()\n except ElementTree.ParseError as err:\n self.handler.stream_parse_error(str(err))\n finally:\n self.in_use = False\n", "source": "the_stack_v2_python_sparse", "source_path": "python3-alpha/python-libs/pyxmpp2/xmppparser.py", "source_repo": "kuri65536/python-for-android", "split": "test", "star_events_count": 280} {"blob_id": "e69a253017953ed1d8f8f62ad2bb5d04f8c3b6e5", "bodies": ["if rest_filter is None:\n rest_filter = dict()\nrest_filter['genepanel_name', 'genepanel_version'] = [(gp.name, gp.version) for gp in user.group.genepanels]\nreturn self.list_query(session, annotationjob.AnnotationJob, schemas.AnnotationJobSchema(), rest_filter=rest_filter, order_by=annotationjob.AnnotationJob.date_submitted.desc(), page=page, per_page=per_page)", "data['user_id'] = user.id\nannotation_job_data = annotationjob.AnnotationJob(**data)\nsession.add(annotation_job_data)\nsession.commit()\nreturn schemas.AnnotationJobSchema().dump(annotation_job_data).data"], "bodies_text": "<|body_start_0|>\n if rest_filter is None:\n rest_filter = dict()\n rest_filter['genepanel_name', 'genepanel_version'] = [(gp.name, gp.version) for gp in user.group.genepanels]\n return self.list_query(session, annotationjob.AnnotationJob, schemas.AnnotationJobSchema(), rest_filter=rest_filter, order_by=annotationjob.AnnotationJob.date_submitted.desc(), page=page, per_page=per_page)\n<|end_body_0|>\n\n<|body_start_1|>\n data['user_id'] = user.id\n annotation_job_data = annotationjob.AnnotationJob(**data)\n session.add(annotation_job_data)\n session.commit()\n return schemas.AnnotationJobSchema().dump(annotation_job_data).data\n<|end_body_1|>\n", "class_docstring": "", "class_name": "AnnotationJobList", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass AnnotationJobList:\n\n def get(self, session, rest_filter=None, page=None, per_page=None, user=None):\n \"\"\"Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import\"\"\"\n <|body_0|>\n\n def post(self, session, data=None, user=None):\n \"\"\"Creates an annotation job in the system. --- summary: Create annotation job tags: - Import\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if rest_filter is None:\n rest_filter = dict()\n rest_filter['genepanel_name', 'genepanel_version'] = [(gp.name, gp.version) for gp in user.group.genepanels]\n return self.list_query(session, annotationjob.AnnotationJob, schemas.AnnotationJobSchema(), rest_filter=rest_filter, order_by=annotationjob.AnnotationJob.date_submitted.desc(), page=page, per_page=per_page)\n<|end_body_0|>\n\n<|body_start_1|>\n data['user_id'] = user.id\n annotation_job_data = annotationjob.AnnotationJob(**data)\n session.add(annotation_job_data)\n session.commit()\n return schemas.AnnotationJobSchema().dump(annotation_job_data).data\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000479", "length_bytes": 3720, "license_type": "permissive", "methods": [{"docstring": "Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import", "name": "get", "signature": "def get(self, session, rest_filter=None, page=None, per_page=None, user=None)"}, {"docstring": "Creates an annotation job in the system. --- summary: Create annotation job tags: - Import", "name": "post", "signature": "def post(self, session, data=None, user=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_val_000223", "prompt": "Implement the Python class `AnnotationJobList` described below.\n\nClass description:\nImplement the AnnotationJobList class.\n\nMethod signatures and docstrings:\n- def get(self, session, rest_filter=None, page=None, per_page=None, user=None): Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import\n- def post(self, session, data=None, user=None): Creates an annotation job in the system. --- summary: Create annotation job tags: - Import", "prompted_full_text": "Implement the Python class `AnnotationJobList` described below.\n\nClass description:\nImplement the AnnotationJobList class.\n\nMethod signatures and docstrings:\n- def get(self, session, rest_filter=None, page=None, per_page=None, user=None): Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import\n- def post(self, session, data=None, user=None): Creates an annotation job in the system. --- summary: Create annotation job tags: - Import\n\n<|skeleton|>\nclass AnnotationJobList:\n\n def get(self, session, rest_filter=None, page=None, per_page=None, user=None):\n \"\"\"Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import\"\"\"\n <|body_0|>\n\n def post(self, session, data=None, user=None):\n \"\"\"Creates an annotation job in the system. --- summary: Create annotation job tags: - Import\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if rest_filter is None:\n rest_filter = dict()\n rest_filter['genepanel_name', 'genepanel_version'] = [(gp.name, gp.version) for gp in user.group.genepanels]\n return self.list_query(session, annotationjob.AnnotationJob, schemas.AnnotationJobSchema(), rest_filter=rest_filter, order_by=annotationjob.AnnotationJob.date_submitted.desc(), page=page, per_page=per_page)\n<|end_body_0|>\n\n<|body_start_1|>\n data['user_id'] = user.id\n annotation_job_data = annotationjob.AnnotationJob(**data)\n session.add(annotation_job_data)\n session.commit()\n return schemas.AnnotationJobSchema().dump(annotation_job_data).data\n<|end_body_1|>\n", "revision_id": "e38631d302611a143c9baaa684bcbd014d9734e4", "skeleton": "<|skeleton|>\nclass AnnotationJobList:\n\n def get(self, session, rest_filter=None, page=None, per_page=None, user=None):\n \"\"\"Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import\"\"\"\n <|body_0|>\n\n def post(self, session, data=None, user=None):\n \"\"\"Creates an annotation job in the system. --- summary: Create annotation job tags: - Import\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class AnnotationJobList:\n def get(self, session, rest_filter=None, page=None, per_page=None, user=None):\n \"\"\"Lists annotation jobs in the system. --- summary: List annotation jobs tags: - Import\"\"\"\n if rest_filter is None:\n rest_filter = dict()\n rest_filter['genepanel_name', 'genepanel_version'] = [(gp.name, gp.version) for gp in user.group.genepanels]\n return self.list_query(session, annotationjob.AnnotationJob, schemas.AnnotationJobSchema(), rest_filter=rest_filter, order_by=annotationjob.AnnotationJob.date_submitted.desc(), page=page, per_page=per_page)\n\n def post(self, session, data=None, user=None):\n \"\"\"Creates an annotation job in the system. --- summary: Create annotation job tags: - Import\"\"\"\n data['user_id'] = user.id\n annotation_job_data = annotationjob.AnnotationJob(**data)\n session.add(annotation_job_data)\n session.commit()\n return schemas.AnnotationJobSchema().dump(annotation_job_data).data\n", "source": "the_stack_v2_python_sparse", "source_path": "src/api/v1/resources/annotationjob.py", "source_repo": "dabble-of-devops-consulting/ella", "split": "test", "star_events_count": 0} {"blob_id": "7d635a82ce03d258c8202e98a81536edadae6452", "bodies": ["self.startDate = startDate\nself.endDate = endDate\nself.aulDBdir = aulDBdir\nself.aulDBName = aulDBName\nself.aulTabName = aulTabName\nself.alSSCutoff = alSSCutoff\nself.aeSSCutoff = aeSSCutoff\nself.minDelT = minDelT\nself.minDiffTime = minDiffTime\nself.aulDF = self._load_aul_data()", "conn = sqlite3.connect(self.aulDBdir + self.aulDBName, detect_types=sqlite3.PARSE_DECLTYPES)\ncommand = 'SELECT * FROM {tb} ' + \"WHERE datetime BETWEEN '{stm}' and '{etm}'\"\ncommand = command.format(tb=self.aulTabName, stm=self.startDate, etm=self.endDate)\nreturn pandas.read_sql(command, conn)", "self.aulDF = self.aulDF[(self.aulDF['al'] >= self.alSSCutoff) & (self.aulDF['ae'] <= self.aeSSCutoff)].reset_index(drop=True)\nself.aulDF['delT'] = self.aulDF['datetime'].diff()\nself.aulDF['delT'] = self.aulDF['delT'].apply(lambda x: x.total_seconds() / 60.0)\nbrkInds = self.aulDF[self.aulDF['delT'] > self.minDelT].index.to_frame().diff().reset_index()\nbrkInds.columns = ['inds', 'diffs']\nshftdRows = brkInds['inds'].shift(1)\nbrkInds['prevRowInds'] = shftdRows\nnonSSDtList = []\nfor row in brkInds[brkInds['diffs'] > self.minDiffTime].iterrows():\n dd = self.aulDF.iloc[int(row[1]['prevRowInds']):int(row[1]['inds'] - 1)]['datetime']\n nonSSDtList.append((dd.min(), dd.max()))\nreturn nonSSDtList"], "bodies_text": "<|body_start_0|>\n self.startDate = startDate\n self.endDate = endDate\n self.aulDBdir = aulDBdir\n self.aulDBName = aulDBName\n self.aulTabName = aulTabName\n self.alSSCutoff = alSSCutoff\n self.aeSSCutoff = aeSSCutoff\n self.minDelT = minDelT\n self.minDiffTime = minDiffTime\n self.aulDF = self._load_aul_data()\n<|end_body_0|>\n\n<|body_start_1|>\n conn = sqlite3.connect(self.aulDBdir + self.aulDBName, detect_types=sqlite3.PARSE_DECLTYPES)\n command = 'SELECT * FROM {tb} ' + \"WHERE datetime BETWEEN '{stm}' and '{etm}'\"\n command = command.format(tb=self.aulTabName, stm=self.startDate, etm=self.endDate)\n return pandas.read_sql(command, conn)\n<|end_body_1|>\n\n<|body_start_2|>\n self.aulDF = self.aulDF[(self.aulDF['al'] >= self.alSSCutoff) & (self.aulDF['ae'] <= self.aeSSCutoff)].reset_index(drop=True)\n self.aulDF['delT'] = self.aulDF['datetime'].diff()\n self.aulDF['delT'] = self.aulDF['delT'].apply(lambda x: x.total_seconds() / 60.0)\n brkInds = self.aulDF[self.aulDF['delT'] > self.minDelT].index.to_frame().diff().reset_index()\n brkInds.columns = ['inds', 'diffs']\n shftdRows = brkInds['inds'].shift(1)\n brkInds['prevRowInds'] = shftdRows\n nonSSDtList = []\n for row in brkInds[brkInds['diffs'] > self.minDiffTime].iterrows():\n dd = self.aulDF.iloc[int(row[1]['prevRowInds']):int(row[1]['inds'] - 1)]['datetime']\n nonSSDtList.append((dd.min(), dd.max()))\n return nonSSDtList\n<|end_body_2|>\n", "class_docstring": "Load the required data into a DF", "class_name": "NonSSData", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass NonSSData:\n \"\"\"Load the required data into a DF\"\"\"\n\n def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180):\n \"\"\"setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.\"\"\"\n <|body_0|>\n\n def _load_aul_data(self):\n \"\"\"Load AUL data\"\"\"\n <|body_1|>\n\n def get_non_ss_dates(self):\n \"\"\"Get a list of dates where non-ss intervals were identified!\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.startDate = startDate\n self.endDate = endDate\n self.aulDBdir = aulDBdir\n self.aulDBName = aulDBName\n self.aulTabName = aulTabName\n self.alSSCutoff = alSSCutoff\n self.aeSSCutoff = aeSSCutoff\n self.minDelT = minDelT\n self.minDiffTime = minDiffTime\n self.aulDF = self._load_aul_data()\n<|end_body_0|>\n\n<|body_start_1|>\n conn = sqlite3.connect(self.aulDBdir + self.aulDBName, detect_types=sqlite3.PARSE_DECLTYPES)\n command = 'SELECT * FROM {tb} ' + \"WHERE datetime BETWEEN '{stm}' and '{etm}'\"\n command = command.format(tb=self.aulTabName, stm=self.startDate, etm=self.endDate)\n return pandas.read_sql(command, conn)\n<|end_body_1|>\n\n<|body_start_2|>\n self.aulDF = self.aulDF[(self.aulDF['al'] >= self.alSSCutoff) & (self.aulDF['ae'] <= self.aeSSCutoff)].reset_index(drop=True)\n self.aulDF['delT'] = self.aulDF['datetime'].diff()\n self.aulDF['delT'] = self.aulDF['delT'].apply(lambda x: x.total_seconds() / 60.0)\n brkInds = self.aulDF[self.aulDF['delT'] > self.minDelT].index.to_frame().diff().reset_index()\n brkInds.columns = ['inds', 'diffs']\n shftdRows = brkInds['inds'].shift(1)\n brkInds['prevRowInds'] = shftdRows\n nonSSDtList = []\n for row in brkInds[brkInds['diffs'] > self.minDiffTime].iterrows():\n dd = self.aulDF.iloc[int(row[1]['prevRowInds']):int(row[1]['inds'] - 1)]['datetime']\n nonSSDtList.append((dd.min(), dd.max()))\n return nonSSDtList\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000480", "length_bytes": 3150, "license_type": "no_license", "methods": [{"docstring": "setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.", "name": "__init__", "signature": "def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180)"}, {"docstring": "Load AUL data", "name": "_load_aul_data", "signature": "def _load_aul_data(self)"}, {"docstring": "Get a list of dates where non-ss intervals were identified!", "name": "get_non_ss_dates", "signature": "def get_non_ss_dates(self)"}], "n_methods": 3, "prompt": "Implement the Python class `NonSSData` described below.\n\nClass description:\nLoad the required data into a DF\n\nMethod signatures and docstrings:\n- def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180): setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.\n- def _load_aul_data(self): Load AUL data\n- def get_non_ss_dates(self): Get a list of dates where non-ss intervals were identified!", "prompted_full_text": "Implement the Python class `NonSSData` described below.\n\nClass description:\nLoad the required data into a DF\n\nMethod signatures and docstrings:\n- def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180): setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.\n- def _load_aul_data(self): Load AUL data\n- def get_non_ss_dates(self): Get a list of dates where non-ss intervals were identified!\n\n<|skeleton|>\nclass NonSSData:\n \"\"\"Load the required data into a DF\"\"\"\n\n def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180):\n \"\"\"setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.\"\"\"\n <|body_0|>\n\n def _load_aul_data(self):\n \"\"\"Load AUL data\"\"\"\n <|body_1|>\n\n def get_non_ss_dates(self):\n \"\"\"Get a list of dates where non-ss intervals were identified!\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.startDate = startDate\n self.endDate = endDate\n self.aulDBdir = aulDBdir\n self.aulDBName = aulDBName\n self.aulTabName = aulTabName\n self.alSSCutoff = alSSCutoff\n self.aeSSCutoff = aeSSCutoff\n self.minDelT = minDelT\n self.minDiffTime = minDiffTime\n self.aulDF = self._load_aul_data()\n<|end_body_0|>\n\n<|body_start_1|>\n conn = sqlite3.connect(self.aulDBdir + self.aulDBName, detect_types=sqlite3.PARSE_DECLTYPES)\n command = 'SELECT * FROM {tb} ' + \"WHERE datetime BETWEEN '{stm}' and '{etm}'\"\n command = command.format(tb=self.aulTabName, stm=self.startDate, etm=self.endDate)\n return pandas.read_sql(command, conn)\n<|end_body_1|>\n\n<|body_start_2|>\n self.aulDF = self.aulDF[(self.aulDF['al'] >= self.alSSCutoff) & (self.aulDF['ae'] <= self.aeSSCutoff)].reset_index(drop=True)\n self.aulDF['delT'] = self.aulDF['datetime'].diff()\n self.aulDF['delT'] = self.aulDF['delT'].apply(lambda x: x.total_seconds() / 60.0)\n brkInds = self.aulDF[self.aulDF['delT'] > self.minDelT].index.to_frame().diff().reset_index()\n brkInds.columns = ['inds', 'diffs']\n shftdRows = brkInds['inds'].shift(1)\n brkInds['prevRowInds'] = shftdRows\n nonSSDtList = []\n for row in brkInds[brkInds['diffs'] > self.minDiffTime].iterrows():\n dd = self.aulDF.iloc[int(row[1]['prevRowInds']):int(row[1]['inds'] - 1)]['datetime']\n nonSSDtList.append((dd.min(), dd.max()))\n return nonSSDtList\n<|end_body_2|>\n", "revision_id": "83b3441c0b2c2af4fae38bca8a0db281bb0ddce9", "skeleton": "<|skeleton|>\nclass NonSSData:\n \"\"\"Load the required data into a DF\"\"\"\n\n def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180):\n \"\"\"setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.\"\"\"\n <|body_0|>\n\n def _load_aul_data(self):\n \"\"\"Load AUL data\"\"\"\n <|body_1|>\n\n def get_non_ss_dates(self):\n \"\"\"Get a list of dates where non-ss intervals were identified!\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class NonSSData:\n \"\"\"Load the required data into a DF\"\"\"\n\n def __init__(self, startDate, endDate, aulDBdir, aulDBName, aulTabName, alSSCutoff=-25, aeSSCutoff=50, minDelT=5, minDiffTime=180):\n \"\"\"setup some vars alSSCutoff and aeSSCutoff are cutoffs for al and ae to identify non-ss periods. minDelT is the minimum time gap where al/ae can exceed cutoffs. minDiffTime is the continuous set of time range which is considered a non-substorm period. In other words, if non-SS conditions are satisfied for this interval of time (say 3 hours), then we consider this period to be a non-ss period.\"\"\"\n self.startDate = startDate\n self.endDate = endDate\n self.aulDBdir = aulDBdir\n self.aulDBName = aulDBName\n self.aulTabName = aulTabName\n self.alSSCutoff = alSSCutoff\n self.aeSSCutoff = aeSSCutoff\n self.minDelT = minDelT\n self.minDiffTime = minDiffTime\n self.aulDF = self._load_aul_data()\n\n def _load_aul_data(self):\n \"\"\"Load AUL data\"\"\"\n conn = sqlite3.connect(self.aulDBdir + self.aulDBName, detect_types=sqlite3.PARSE_DECLTYPES)\n command = 'SELECT * FROM {tb} ' + \"WHERE datetime BETWEEN '{stm}' and '{etm}'\"\n command = command.format(tb=self.aulTabName, stm=self.startDate, etm=self.endDate)\n return pandas.read_sql(command, conn)\n\n def get_non_ss_dates(self):\n \"\"\"Get a list of dates where non-ss intervals were identified!\"\"\"\n self.aulDF = self.aulDF[(self.aulDF['al'] >= self.alSSCutoff) & (self.aulDF['ae'] <= self.aeSSCutoff)].reset_index(drop=True)\n self.aulDF['delT'] = self.aulDF['datetime'].diff()\n self.aulDF['delT'] = self.aulDF['delT'].apply(lambda x: x.total_seconds() / 60.0)\n brkInds = self.aulDF[self.aulDF['delT'] > self.minDelT].index.to_frame().diff().reset_index()\n brkInds.columns = ['inds', 'diffs']\n shftdRows = brkInds['inds'].shift(1)\n brkInds['prevRowInds'] = shftdRows\n nonSSDtList = []\n for row in brkInds[brkInds['diffs'] > self.minDiffTime].iterrows():\n dd = self.aulDF.iloc[int(row[1]['prevRowInds']):int(row[1]['inds'] - 1)]['datetime']\n nonSSDtList.append((dd.min(), dd.max()))\n return nonSSDtList\n", "source": "the_stack_v2_python_sparse", "source_path": "data_pipeline/non_ss_dataset.py", "source_repo": "MuhammadVT/sson_pred", "split": "test", "star_events_count": 2} {"blob_id": "dd7e00413d68a10694d6948d05a646c9fcb96217", "bodies": ["pins = pins or PINS\nself.touch = []\nfor gpio in pins:\n self.touch.append(Button(gpio))", "for item in self.touch:\n if item.is_pressed:\n return True\nreturn False"], "bodies_text": "<|body_start_0|>\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n<|end_body_0|>\n\n<|body_start_1|>\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n<|end_body_1|>\n", "class_docstring": "Sensor class", "class_name": "Sensor", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n<|end_body_0|>\n\n<|body_start_1|>\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000481", "length_bytes": 1508, "license_type": "permissive", "methods": [{"docstring": "Constructor", "name": "__init__", "signature": "def __init__(self, pins=None)"}, {"docstring": "Check sensor state", "name": "check_sensor", "signature": "def check_sensor(self)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_026062", "prompt": "Implement the Python class `Sensor` described below.\n\nClass description:\nSensor class\n\nMethod signatures and docstrings:\n- def __init__(self, pins=None): Constructor\n- def check_sensor(self): Check sensor state", "prompted_full_text": "Implement the Python class `Sensor` described below.\n\nClass description:\nSensor class\n\nMethod signatures and docstrings:\n- def __init__(self, pins=None): Constructor\n- def check_sensor(self): Check sensor state\n\n<|skeleton|>\nclass Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n<|end_body_0|>\n\n<|body_start_1|>\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n<|end_body_1|>\n", "revision_id": "cfba2860145978904d1dd427f2326efeccfc561a", "skeleton": "<|skeleton|>\nclass Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n <|body_0|>\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Sensor:\n \"\"\"Sensor class\"\"\"\n\n def __init__(self, pins=None):\n \"\"\"Constructor\"\"\"\n pins = pins or PINS\n self.touch = []\n for gpio in pins:\n self.touch.append(Button(gpio))\n\n def check_sensor(self):\n \"\"\"Check sensor state\"\"\"\n for item in self.touch:\n if item.is_pressed:\n return True\n return False\n", "source": "the_stack_v2_python_sparse", "source_path": "chapter_12/avoidance.py", "source_repo": "packtjaniceg/Raspberry-Pi-4-Cookbook-for-Python-Programmers-Fourth-Edition", "split": "test", "star_events_count": 0} {"blob_id": "4f9930ba09e5fa49e635ef3d698a646772f4c33f", "bodies": ["from scitbx.array_family import flex\nself._mask = mask.as_1d().as_int()\nself._mask.reshape(flex.grid(mask.all()))\nself._gain = None\nself._background = None\nself._kernel_size = kernel_size\nself._count = 1", "from scitbx.array_family import flex\nimport numpy\nmask = self._mask.deep_copy()\ngain_map, mask, mean = self._compute_gain_map(image, mask)\nmask = self._remove_strong_pixels(gain_map, mask)\ngain_map = gain_map.as_numpy_array()\nindex = numpy.where(mask.as_numpy_array() == 0)\ngain_map[index] = 1.0\ngain_map = flex.double(gain_map)\nif self._gain is None:\n self._gain = gain_map\nelse:\n self._gain += gain_map\nif self._background is None:\n self._background = mean\nelse:\n self._background += mean\nself._count += 1", "from dials.algorithms.image.filter import index_of_dispersion_filter\nimage = image.as_double()\nfilteralg = index_of_dispersion_filter(image, mask, self._kernel_size, 0)\nfiltered = filteralg.index_of_dispersion()\nmask = filteralg.mask()\nmean = filteralg.mean()\nreturn (filtered, mask, mean)", "from scitbx.array_family import flex\nfrom operator import mul\nfrom math import sqrt\nimport numpy\nn = reduce(mul, self._kernel_size)\nbound = 1.0 + 3.0 * (2.0 / sqrt(n - 1))\nindex = numpy.where(gain_map.as_numpy_array() > bound)\nmask = mask.as_numpy_array()\nmask[index] = 0\nmask = flex.int(mask)\nreturn mask", "from dials.algorithms.image.filter import mean_filter\nself._gain /= self._count\nself._gain *= self._mask.as_double()\nreturn mean_filter(self._gain, self._mask, self._kernel_size, 0)", "from dials.algorithms.image.filter import mean_filter\nself._background /= self._count\nself._gain *= self._mask.as_double()\nreturn mean_filter(self._background, self._mask, self._kernel_size, 0)"], "bodies_text": "<|body_start_0|>\n from scitbx.array_family import flex\n self._mask = mask.as_1d().as_int()\n self._mask.reshape(flex.grid(mask.all()))\n self._gain = None\n self._background = None\n self._kernel_size = kernel_size\n self._count = 1\n<|end_body_0|>\n\n<|body_start_1|>\n from scitbx.array_family import flex\n import numpy\n mask = self._mask.deep_copy()\n gain_map, mask, mean = self._compute_gain_map(image, mask)\n mask = self._remove_strong_pixels(gain_map, mask)\n gain_map = gain_map.as_numpy_array()\n index = numpy.where(mask.as_numpy_array() == 0)\n gain_map[index] = 1.0\n gain_map = flex.double(gain_map)\n if self._gain is None:\n self._gain = gain_map\n else:\n self._gain += gain_map\n if self._background is None:\n self._background = mean\n else:\n self._background += mean\n self._count += 1\n<|end_body_1|>\n\n<|body_start_2|>\n from dials.algorithms.image.filter import index_of_dispersion_filter\n image = image.as_double()\n filteralg = index_of_dispersion_filter(image, mask, self._kernel_size, 0)\n filtered = filteralg.index_of_dispersion()\n mask = filteralg.mask()\n mean = filteralg.mean()\n return (filtered, mask, mean)\n<|end_body_2|>\n\n<|body_start_3|>\n from scitbx.array_family import flex\n from operator import mul\n from math import sqrt\n import numpy\n n = reduce(mul, self._kernel_size)\n bound = 1.0 + 3.0 * (2.0 / sqrt(n - 1))\n index = numpy.where(gain_map.as_numpy_array() > bound)\n mask = mask.as_numpy_array()\n mask[index] = 0\n mask = flex.int(mask)\n return mask\n<|end_body_3|>\n\n<|body_start_4|>\n from dials.algorithms.image.filter import mean_filter\n self._gain /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._gain, self._mask, self._kernel_size, 0)\n<|end_body_4|>\n\n<|body_start_5|>\n from dials.algorithms.image.filter import mean_filter\n self._background /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._background, self._mask, self._kernel_size, 0)\n<|end_body_5|>\n", "class_docstring": "Class to calculate the gain map of a detector.", "class_name": "ComputeBackgroundAndGain", "detected_licenses": ["BSD-3-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ComputeBackgroundAndGain:\n \"\"\"Class to calculate the gain map of a detector.\"\"\"\n\n def __init__(self, mask, kernel_size=(3, 3)):\n \"\"\"Initialise the class with a mask of trusted regions\"\"\"\n <|body_0|>\n\n def add(self, image):\n \"\"\"Add an image to calculation.\"\"\"\n <|body_1|>\n\n def _compute_gain_map(self, image, mask):\n \"\"\"Complete the gain map of a single image.\"\"\"\n <|body_2|>\n\n def _remove_strong_pixels(self, gain_map, mask):\n \"\"\"Remove strong pixels from the gain map.\"\"\"\n <|body_3|>\n\n def gain(self):\n \"\"\"Compute the full gain map.\"\"\"\n <|body_4|>\n\n def background(self):\n \"\"\"Compute the full background map.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from scitbx.array_family import flex\n self._mask = mask.as_1d().as_int()\n self._mask.reshape(flex.grid(mask.all()))\n self._gain = None\n self._background = None\n self._kernel_size = kernel_size\n self._count = 1\n<|end_body_0|>\n\n<|body_start_1|>\n from scitbx.array_family import flex\n import numpy\n mask = self._mask.deep_copy()\n gain_map, mask, mean = self._compute_gain_map(image, mask)\n mask = self._remove_strong_pixels(gain_map, mask)\n gain_map = gain_map.as_numpy_array()\n index = numpy.where(mask.as_numpy_array() == 0)\n gain_map[index] = 1.0\n gain_map = flex.double(gain_map)\n if self._gain is None:\n self._gain = gain_map\n else:\n self._gain += gain_map\n if self._background is None:\n self._background = mean\n else:\n self._background += mean\n self._count += 1\n<|end_body_1|>\n\n<|body_start_2|>\n from dials.algorithms.image.filter import index_of_dispersion_filter\n image = image.as_double()\n filteralg = index_of_dispersion_filter(image, mask, self._kernel_size, 0)\n filtered = filteralg.index_of_dispersion()\n mask = filteralg.mask()\n mean = filteralg.mean()\n return (filtered, mask, mean)\n<|end_body_2|>\n\n<|body_start_3|>\n from scitbx.array_family import flex\n from operator import mul\n from math import sqrt\n import numpy\n n = reduce(mul, self._kernel_size)\n bound = 1.0 + 3.0 * (2.0 / sqrt(n - 1))\n index = numpy.where(gain_map.as_numpy_array() > bound)\n mask = mask.as_numpy_array()\n mask[index] = 0\n mask = flex.int(mask)\n return mask\n<|end_body_3|>\n\n<|body_start_4|>\n from dials.algorithms.image.filter import mean_filter\n self._gain /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._gain, self._mask, self._kernel_size, 0)\n<|end_body_4|>\n\n<|body_start_5|>\n from dials.algorithms.image.filter import mean_filter\n self._background /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._background, self._mask, self._kernel_size, 0)\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000482", "length_bytes": 3407, "license_type": "permissive", "methods": [{"docstring": "Initialise the class with a mask of trusted regions", "name": "__init__", "signature": "def __init__(self, mask, kernel_size=(3, 3))"}, {"docstring": "Add an image to calculation.", "name": "add", "signature": "def add(self, image)"}, {"docstring": "Complete the gain map of a single image.", "name": "_compute_gain_map", "signature": "def _compute_gain_map(self, image, mask)"}, {"docstring": "Remove strong pixels from the gain map.", "name": "_remove_strong_pixels", "signature": "def _remove_strong_pixels(self, gain_map, mask)"}, {"docstring": "Compute the full gain map.", "name": "gain", "signature": "def gain(self)"}, {"docstring": "Compute the full background map.", "name": "background", "signature": "def background(self)"}], "n_methods": 6, "prompt": "Implement the Python class `ComputeBackgroundAndGain` described below.\n\nClass description:\nClass to calculate the gain map of a detector.\n\nMethod signatures and docstrings:\n- def __init__(self, mask, kernel_size=(3, 3)): Initialise the class with a mask of trusted regions\n- def add(self, image): Add an image to calculation.\n- def _compute_gain_map(self, image, mask): Complete the gain map of a single image.\n- def _remove_strong_pixels(self, gain_map, mask): Remove strong pixels from the gain map.\n- def gain(self): Compute the full gain map.\n- def background(self): Compute the full background map.", "prompted_full_text": "Implement the Python class `ComputeBackgroundAndGain` described below.\n\nClass description:\nClass to calculate the gain map of a detector.\n\nMethod signatures and docstrings:\n- def __init__(self, mask, kernel_size=(3, 3)): Initialise the class with a mask of trusted regions\n- def add(self, image): Add an image to calculation.\n- def _compute_gain_map(self, image, mask): Complete the gain map of a single image.\n- def _remove_strong_pixels(self, gain_map, mask): Remove strong pixels from the gain map.\n- def gain(self): Compute the full gain map.\n- def background(self): Compute the full background map.\n\n<|skeleton|>\nclass ComputeBackgroundAndGain:\n \"\"\"Class to calculate the gain map of a detector.\"\"\"\n\n def __init__(self, mask, kernel_size=(3, 3)):\n \"\"\"Initialise the class with a mask of trusted regions\"\"\"\n <|body_0|>\n\n def add(self, image):\n \"\"\"Add an image to calculation.\"\"\"\n <|body_1|>\n\n def _compute_gain_map(self, image, mask):\n \"\"\"Complete the gain map of a single image.\"\"\"\n <|body_2|>\n\n def _remove_strong_pixels(self, gain_map, mask):\n \"\"\"Remove strong pixels from the gain map.\"\"\"\n <|body_3|>\n\n def gain(self):\n \"\"\"Compute the full gain map.\"\"\"\n <|body_4|>\n\n def background(self):\n \"\"\"Compute the full background map.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n from scitbx.array_family import flex\n self._mask = mask.as_1d().as_int()\n self._mask.reshape(flex.grid(mask.all()))\n self._gain = None\n self._background = None\n self._kernel_size = kernel_size\n self._count = 1\n<|end_body_0|>\n\n<|body_start_1|>\n from scitbx.array_family import flex\n import numpy\n mask = self._mask.deep_copy()\n gain_map, mask, mean = self._compute_gain_map(image, mask)\n mask = self._remove_strong_pixels(gain_map, mask)\n gain_map = gain_map.as_numpy_array()\n index = numpy.where(mask.as_numpy_array() == 0)\n gain_map[index] = 1.0\n gain_map = flex.double(gain_map)\n if self._gain is None:\n self._gain = gain_map\n else:\n self._gain += gain_map\n if self._background is None:\n self._background = mean\n else:\n self._background += mean\n self._count += 1\n<|end_body_1|>\n\n<|body_start_2|>\n from dials.algorithms.image.filter import index_of_dispersion_filter\n image = image.as_double()\n filteralg = index_of_dispersion_filter(image, mask, self._kernel_size, 0)\n filtered = filteralg.index_of_dispersion()\n mask = filteralg.mask()\n mean = filteralg.mean()\n return (filtered, mask, mean)\n<|end_body_2|>\n\n<|body_start_3|>\n from scitbx.array_family import flex\n from operator import mul\n from math import sqrt\n import numpy\n n = reduce(mul, self._kernel_size)\n bound = 1.0 + 3.0 * (2.0 / sqrt(n - 1))\n index = numpy.where(gain_map.as_numpy_array() > bound)\n mask = mask.as_numpy_array()\n mask[index] = 0\n mask = flex.int(mask)\n return mask\n<|end_body_3|>\n\n<|body_start_4|>\n from dials.algorithms.image.filter import mean_filter\n self._gain /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._gain, self._mask, self._kernel_size, 0)\n<|end_body_4|>\n\n<|body_start_5|>\n from dials.algorithms.image.filter import mean_filter\n self._background /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._background, self._mask, self._kernel_size, 0)\n<|end_body_5|>\n", "revision_id": "fb9672b91854f564cbbba6f1cceeefa18d135965", "skeleton": "<|skeleton|>\nclass ComputeBackgroundAndGain:\n \"\"\"Class to calculate the gain map of a detector.\"\"\"\n\n def __init__(self, mask, kernel_size=(3, 3)):\n \"\"\"Initialise the class with a mask of trusted regions\"\"\"\n <|body_0|>\n\n def add(self, image):\n \"\"\"Add an image to calculation.\"\"\"\n <|body_1|>\n\n def _compute_gain_map(self, image, mask):\n \"\"\"Complete the gain map of a single image.\"\"\"\n <|body_2|>\n\n def _remove_strong_pixels(self, gain_map, mask):\n \"\"\"Remove strong pixels from the gain map.\"\"\"\n <|body_3|>\n\n def gain(self):\n \"\"\"Compute the full gain map.\"\"\"\n <|body_4|>\n\n def background(self):\n \"\"\"Compute the full background map.\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ComputeBackgroundAndGain:\n \"\"\"Class to calculate the gain map of a detector.\"\"\"\n\n def __init__(self, mask, kernel_size=(3, 3)):\n \"\"\"Initialise the class with a mask of trusted regions\"\"\"\n from scitbx.array_family import flex\n self._mask = mask.as_1d().as_int()\n self._mask.reshape(flex.grid(mask.all()))\n self._gain = None\n self._background = None\n self._kernel_size = kernel_size\n self._count = 1\n\n def add(self, image):\n \"\"\"Add an image to calculation.\"\"\"\n from scitbx.array_family import flex\n import numpy\n mask = self._mask.deep_copy()\n gain_map, mask, mean = self._compute_gain_map(image, mask)\n mask = self._remove_strong_pixels(gain_map, mask)\n gain_map = gain_map.as_numpy_array()\n index = numpy.where(mask.as_numpy_array() == 0)\n gain_map[index] = 1.0\n gain_map = flex.double(gain_map)\n if self._gain is None:\n self._gain = gain_map\n else:\n self._gain += gain_map\n if self._background is None:\n self._background = mean\n else:\n self._background += mean\n self._count += 1\n\n def _compute_gain_map(self, image, mask):\n \"\"\"Complete the gain map of a single image.\"\"\"\n from dials.algorithms.image.filter import index_of_dispersion_filter\n image = image.as_double()\n filteralg = index_of_dispersion_filter(image, mask, self._kernel_size, 0)\n filtered = filteralg.index_of_dispersion()\n mask = filteralg.mask()\n mean = filteralg.mean()\n return (filtered, mask, mean)\n\n def _remove_strong_pixels(self, gain_map, mask):\n \"\"\"Remove strong pixels from the gain map.\"\"\"\n from scitbx.array_family import flex\n from operator import mul\n from math import sqrt\n import numpy\n n = reduce(mul, self._kernel_size)\n bound = 1.0 + 3.0 * (2.0 / sqrt(n - 1))\n index = numpy.where(gain_map.as_numpy_array() > bound)\n mask = mask.as_numpy_array()\n mask[index] = 0\n mask = flex.int(mask)\n return mask\n\n def gain(self):\n \"\"\"Compute the full gain map.\"\"\"\n from dials.algorithms.image.filter import mean_filter\n self._gain /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._gain, self._mask, self._kernel_size, 0)\n\n def background(self):\n \"\"\"Compute the full background map.\"\"\"\n from dials.algorithms.image.filter import mean_filter\n self._background /= self._count\n self._gain *= self._mask.as_double()\n return mean_filter(self._background, self._mask, self._kernel_size, 0)\n", "source": "the_stack_v2_python_sparse", "source_path": "algorithms/background_lookup/background_and_gain.py", "source_repo": "jbeilstenedmands/dials", "split": "test", "star_events_count": 0} {"blob_id": "cf0c3cc2469e73713689eb926d630c5644478781", "bodies": ["self.positional_encoding = SinePositionalEncoding(**self.positional_encoding)\nself.encoder = DetrTransformerEncoder(**self.encoder)\nself.decoder = ConditionalDetrTransformerDecoder(**self.decoder)\nself.embed_dims = self.encoder.embed_dims\nself.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\nnum_feats = self.positional_encoding.num_feats\nassert num_feats * 2 == self.embed_dims, f'embed_dims should be exactly 2 times of num_feats. Found {self.embed_dims} and {num_feats}.'", "hidden_states, references = self.decoder(query=query, key=memory, query_pos=query_pos, key_pos=memory_pos, key_padding_mask=memory_mask)\nhead_inputs_dict = dict(hidden_states=hidden_states, references=references)\nreturn head_inputs_dict"], "bodies_text": "<|body_start_0|>\n self.positional_encoding = SinePositionalEncoding(**self.positional_encoding)\n self.encoder = DetrTransformerEncoder(**self.encoder)\n self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)\n self.embed_dims = self.encoder.embed_dims\n self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n num_feats = self.positional_encoding.num_feats\n assert num_feats * 2 == self.embed_dims, f'embed_dims should be exactly 2 times of num_feats. Found {self.embed_dims} and {num_feats}.'\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_states, references = self.decoder(query=query, key=memory, query_pos=query_pos, key_pos=memory_pos, key_padding_mask=memory_mask)\n head_inputs_dict = dict(hidden_states=hidden_states, references=references)\n return head_inputs_dict\n<|end_body_1|>\n", "class_docstring": "Implementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.", "class_name": "ConditionalDETR", "detected_licenses": ["Apache-2.0", "BSD-3-Clause", "MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ConditionalDETR:\n \"\"\"Implementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.\"\"\"\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n <|body_0|>\n\n def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n \"\"\"Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.positional_encoding = SinePositionalEncoding(**self.positional_encoding)\n self.encoder = DetrTransformerEncoder(**self.encoder)\n self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)\n self.embed_dims = self.encoder.embed_dims\n self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n num_feats = self.positional_encoding.num_feats\n assert num_feats * 2 == self.embed_dims, f'embed_dims should be exactly 2 times of num_feats. Found {self.embed_dims} and {num_feats}.'\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_states, references = self.decoder(query=query, key=memory, query_pos=query_pos, key_pos=memory_pos, key_padding_mask=memory_mask)\n head_inputs_dict = dict(hidden_states=hidden_states, references=references)\n return head_inputs_dict\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000483", "length_bytes": 3029, "license_type": "permissive", "methods": [{"docstring": "Initialize layers except for backbone, neck and bbox_head.", "name": "_init_layers", "signature": "def _init_layers(self) -> None"}, {"docstring": "Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b", "name": "forward_decoder", "signature": "def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003478", "prompt": "Implement the Python class `ConditionalDETR` described below.\n\nClass description:\nImplementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.\n\nMethod signatures and docstrings:\n- def _init_layers(self) -> None: Initialize layers except for backbone, neck and bbox_head.\n- def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict: Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b", "prompted_full_text": "Implement the Python class `ConditionalDETR` described below.\n\nClass description:\nImplementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.\n\nMethod signatures and docstrings:\n- def _init_layers(self) -> None: Initialize layers except for backbone, neck and bbox_head.\n- def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict: Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b\n\n<|skeleton|>\nclass ConditionalDETR:\n \"\"\"Implementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.\"\"\"\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n <|body_0|>\n\n def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n \"\"\"Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.positional_encoding = SinePositionalEncoding(**self.positional_encoding)\n self.encoder = DetrTransformerEncoder(**self.encoder)\n self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)\n self.embed_dims = self.encoder.embed_dims\n self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n num_feats = self.positional_encoding.num_feats\n assert num_feats * 2 == self.embed_dims, f'embed_dims should be exactly 2 times of num_feats. Found {self.embed_dims} and {num_feats}.'\n<|end_body_0|>\n\n<|body_start_1|>\n hidden_states, references = self.decoder(query=query, key=memory, query_pos=query_pos, key_pos=memory_pos, key_padding_mask=memory_mask)\n head_inputs_dict = dict(hidden_states=hidden_states, references=references)\n return head_inputs_dict\n<|end_body_1|>\n", "revision_id": "8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6", "skeleton": "<|skeleton|>\nclass ConditionalDETR:\n \"\"\"Implementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.\"\"\"\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n <|body_0|>\n\n def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n \"\"\"Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ConditionalDETR:\n \"\"\"Implementation of `Conditional DETR for Fast Training Convergence. `_. Code is modified from the `official github repo `_.\"\"\"\n\n def _init_layers(self) -> None:\n \"\"\"Initialize layers except for backbone, neck and bbox_head.\"\"\"\n self.positional_encoding = SinePositionalEncoding(**self.positional_encoding)\n self.encoder = DetrTransformerEncoder(**self.encoder)\n self.decoder = ConditionalDetrTransformerDecoder(**self.decoder)\n self.embed_dims = self.encoder.embed_dims\n self.query_embedding = nn.Embedding(self.num_queries, self.embed_dims)\n num_feats = self.positional_encoding.num_feats\n assert num_feats * 2 == self.embed_dims, f'embed_dims should be exactly 2 times of num_feats. Found {self.embed_dims} and {num_feats}.'\n\n def forward_decoder(self, query: Tensor, query_pos: Tensor, memory: Tensor, memory_mask: Tensor, memory_pos: Tensor) -> Dict:\n \"\"\"Forward with Transformer decoder. Args: query (Tensor): The queries of decoder inputs, has shape (bs, num_queries, dim). query_pos (Tensor): The positional queries of decoder inputs, has shape (bs, num_queries, dim). memory (Tensor): The output embeddings of the Transformer encoder, has shape (bs, num_feat_points, dim). memory_mask (Tensor): ByteTensor, the padding mask of the memory, has shape (bs, num_feat_points). memory_pos (Tensor): The positional embeddings of memory, has shape (bs, num_feat_points, dim). Returns: dict: The dictionary of decoder outputs, which includes the `hidden_states` and `references` of the decoder output. - hidden_states (Tensor): Has shape (num_decoder_layers, b\"\"\"\n hidden_states, references = self.decoder(query=query, key=memory, query_pos=query_pos, key_pos=memory_pos, key_padding_mask=memory_mask)\n head_inputs_dict = dict(hidden_states=hidden_states, references=references)\n return head_inputs_dict\n", "source": "the_stack_v2_python_sparse", "source_path": "ai/mmdetection/mmdet/models/detectors/conditional_detr.py", "source_repo": "alldatacenter/alldata", "split": "test", "star_events_count": 774} {"blob_id": "277761e49362c32fff676c0e2f3186d6451442f9", "bodies": ["self.result_text = result_text\nself.flash_output = flash_output\nself.status = self.STATUS_FAILED\nself.save()\nreturn True", "self.result_text = result_text\nself.flash_output = flash_output\nself.status = self.STATUS_FINISHED\nself.save()\nreturn True"], "bodies_text": "<|body_start_0|>\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FAILED\n self.save()\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FINISHED\n self.save()\n return True\n<|end_body_1|>\n", "class_docstring": "", "class_name": "FlashRequest", "detected_licenses": ["MIT", "BSD-3-Clause", "LicenseRef-scancode-other-permissive", "GPL-3.0-only", "PostgreSQL", "BSD-2-Clause"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass FlashRequest:\n\n def fail(self, result_text, flash_output=''):\n \"\"\"FlashRequest.fail is just a fast way to set the status & result text and save the object\"\"\"\n <|body_0|>\n\n def succeed(self, result_text, flash_output=''):\n \"\"\"FlashRequest.succeed is just a fast way to set the status & result text and save the object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FAILED\n self.save()\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FINISHED\n self.save()\n return True\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000484", "length_bytes": 20289, "license_type": "permissive", "methods": [{"docstring": "FlashRequest.fail is just a fast way to set the status & result text and save the object", "name": "fail", "signature": "def fail(self, result_text, flash_output='')"}, {"docstring": "FlashRequest.succeed is just a fast way to set the status & result text and save the object", "name": "succeed", "signature": "def succeed(self, result_text, flash_output='')"}], "n_methods": 2, "prompt": "Implement the Python class `FlashRequest` described below.\n\nClass description:\nImplement the FlashRequest class.\n\nMethod signatures and docstrings:\n- def fail(self, result_text, flash_output=''): FlashRequest.fail is just a fast way to set the status & result text and save the object\n- def succeed(self, result_text, flash_output=''): FlashRequest.succeed is just a fast way to set the status & result text and save the object", "prompted_full_text": "Implement the Python class `FlashRequest` described below.\n\nClass description:\nImplement the FlashRequest class.\n\nMethod signatures and docstrings:\n- def fail(self, result_text, flash_output=''): FlashRequest.fail is just a fast way to set the status & result text and save the object\n- def succeed(self, result_text, flash_output=''): FlashRequest.succeed is just a fast way to set the status & result text and save the object\n\n<|skeleton|>\nclass FlashRequest:\n\n def fail(self, result_text, flash_output=''):\n \"\"\"FlashRequest.fail is just a fast way to set the status & result text and save the object\"\"\"\n <|body_0|>\n\n def succeed(self, result_text, flash_output=''):\n \"\"\"FlashRequest.succeed is just a fast way to set the status & result text and save the object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FAILED\n self.save()\n return True\n<|end_body_0|>\n\n<|body_start_1|>\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FINISHED\n self.save()\n return True\n<|end_body_1|>\n", "revision_id": "1aea05b07b9445edce2f39480b03c93eced125e6", "skeleton": "<|skeleton|>\nclass FlashRequest:\n\n def fail(self, result_text, flash_output=''):\n \"\"\"FlashRequest.fail is just a fast way to set the status & result text and save the object\"\"\"\n <|body_0|>\n\n def succeed(self, result_text, flash_output=''):\n \"\"\"FlashRequest.succeed is just a fast way to set the status & result text and save the object\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class FlashRequest:\n def fail(self, result_text, flash_output=''):\n \"\"\"FlashRequest.fail is just a fast way to set the status & result text and save the object\"\"\"\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FAILED\n self.save()\n return True\n\n def succeed(self, result_text, flash_output=''):\n \"\"\"FlashRequest.succeed is just a fast way to set the status & result text and save the object\"\"\"\n self.result_text = result_text\n self.flash_output = flash_output\n self.status = self.STATUS_FINISHED\n self.save()\n return True\n", "source": "the_stack_v2_python_sparse", "source_path": "firmware_flash/models.py", "source_repo": "thorrak/fermentrack", "split": "test", "star_events_count": 126} {"blob_id": "59b4d77ef013c783f6e8ef623ee63587d06ed3ca", "bodies": ["assert isinstance(block_string, str)\nops = block_string.split('_')\noptions = {}\nfor op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\nassert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\nreturn StageArgs(num_repeat=int(options['r']), kernel_size=int(options['k']), stride=[int(options['s'][0])], expand_ratio=int(options['e']), input_filters=int(options['i']), output_filters=int(options['o']), se_ratio=float(options['se']) if 'se' in options else None, id_skip='noskip' not in block_string)", "args = ['r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters]\nif 0 < block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\nif block.id_skip is False:\n args.append('noskip')\nreturn '_'.join(args)", "assert isinstance(string_list, list)\nblocks_args = []\nfor block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\nreturn blocks_args", "block_strings = []\nfor block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\nreturn block_strings"], "bodies_text": "<|body_start_0|>\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return StageArgs(num_repeat=int(options['r']), kernel_size=int(options['k']), stride=[int(options['s'][0])], expand_ratio=int(options['e']), input_filters=int(options['i']), output_filters=int(options['o']), se_ratio=float(options['se']) if 'se' in options else None, id_skip='noskip' not in block_string)\n<|end_body_0|>\n\n<|body_start_1|>\n args = ['r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters]\n if 0 < block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n<|end_body_1|>\n\n<|body_start_2|>\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n<|end_body_2|>\n\n<|body_start_3|>\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n<|end_body_3|>\n", "class_docstring": "Block Decoder for readability, straight from the official TensorFlow repository.", "class_name": "BlockDecoder", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository.\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.\"\"\"\n <|body_0|>\n\n def _encode_block_string(block):\n \"\"\"Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.\"\"\"\n <|body_1|>\n\n def decode(string_list):\n \"\"\"Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.\"\"\"\n <|body_2|>\n\n def encode(blocks_args):\n \"\"\"Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return StageArgs(num_repeat=int(options['r']), kernel_size=int(options['k']), stride=[int(options['s'][0])], expand_ratio=int(options['e']), input_filters=int(options['i']), output_filters=int(options['o']), se_ratio=float(options['se']) if 'se' in options else None, id_skip='noskip' not in block_string)\n<|end_body_0|>\n\n<|body_start_1|>\n args = ['r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters]\n if 0 < block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n<|end_body_1|>\n\n<|body_start_2|>\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n<|end_body_2|>\n\n<|body_start_3|>\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000485", "length_bytes": 15176, "license_type": "no_license", "methods": [{"docstring": "Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.", "name": "_decode_block_string", "signature": "def _decode_block_string(block_string)"}, {"docstring": "Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.", "name": "_encode_block_string", "signature": "def _encode_block_string(block)"}, {"docstring": "Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.", "name": "decode", "signature": "def decode(string_list)"}, {"docstring": "Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.", "name": "encode", "signature": "def encode(blocks_args)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_030495", "prompt": "Implement the Python class `BlockDecoder` described below.\n\nClass description:\nBlock Decoder for readability, straight from the official TensorFlow repository.\n\nMethod signatures and docstrings:\n- def _decode_block_string(block_string): Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.\n- def _encode_block_string(block): Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.\n- def decode(string_list): Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.\n- def encode(blocks_args): Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.", "prompted_full_text": "Implement the Python class `BlockDecoder` described below.\n\nClass description:\nBlock Decoder for readability, straight from the official TensorFlow repository.\n\nMethod signatures and docstrings:\n- def _decode_block_string(block_string): Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.\n- def _encode_block_string(block): Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.\n- def decode(string_list): Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.\n- def encode(blocks_args): Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.\n\n<|skeleton|>\nclass BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository.\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.\"\"\"\n <|body_0|>\n\n def _encode_block_string(block):\n \"\"\"Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.\"\"\"\n <|body_1|>\n\n def decode(string_list):\n \"\"\"Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.\"\"\"\n <|body_2|>\n\n def encode(blocks_args):\n \"\"\"Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return StageArgs(num_repeat=int(options['r']), kernel_size=int(options['k']), stride=[int(options['s'][0])], expand_ratio=int(options['e']), input_filters=int(options['i']), output_filters=int(options['o']), se_ratio=float(options['se']) if 'se' in options else None, id_skip='noskip' not in block_string)\n<|end_body_0|>\n\n<|body_start_1|>\n args = ['r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters]\n if 0 < block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n<|end_body_1|>\n\n<|body_start_2|>\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n<|end_body_2|>\n\n<|body_start_3|>\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n<|end_body_3|>\n", "revision_id": "880ce20b9d2defe541125e798f43fc8713d26659", "skeleton": "<|skeleton|>\nclass BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository.\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.\"\"\"\n <|body_0|>\n\n def _encode_block_string(block):\n \"\"\"Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.\"\"\"\n <|body_1|>\n\n def decode(string_list):\n \"\"\"Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.\"\"\"\n <|body_2|>\n\n def encode(blocks_args):\n \"\"\"Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class BlockDecoder:\n \"\"\"Block Decoder for readability, straight from the official TensorFlow repository.\"\"\"\n\n def _decode_block_string(block_string):\n \"\"\"Get a block through a string notation of arguments. Args: block_string (str): A string notation of arguments. Examples: 'r1_k3_s11_e1_i32_o16_se0.25_noskip'. Returns: StageArgs: The namedtuple defined at the top of this file.\"\"\"\n assert isinstance(block_string, str)\n ops = block_string.split('_')\n options = {}\n for op in ops:\n splits = re.split('(\\\\d.*)', op)\n if len(splits) >= 2:\n key, value = splits[:2]\n options[key] = value\n assert 's' in options and len(options['s']) == 1 or (len(options['s']) == 2 and options['s'][0] == options['s'][1])\n return StageArgs(num_repeat=int(options['r']), kernel_size=int(options['k']), stride=[int(options['s'][0])], expand_ratio=int(options['e']), input_filters=int(options['i']), output_filters=int(options['o']), se_ratio=float(options['se']) if 'se' in options else None, id_skip='noskip' not in block_string)\n\n def _encode_block_string(block):\n \"\"\"Encode a block to a string. Args: block (namedtuple): A BlockArgs type argument. Returns: block_string: A String form of BlockArgs.\"\"\"\n args = ['r%d' % block.num_repeat, 'k%d' % block.kernel_size, 's%d%d' % (block.strides[0], block.strides[1]), 'e%s' % block.expand_ratio, 'i%d' % block.input_filters, 'o%d' % block.output_filters]\n if 0 < block.se_ratio <= 1:\n args.append('se%s' % block.se_ratio)\n if block.id_skip is False:\n args.append('noskip')\n return '_'.join(args)\n\n def decode(string_list):\n \"\"\"Decode a list of string notations to specify blocks inside the network. Args: string_list (list[str]): A list of strings, each string is a notation of block. Returns: blocks_args: A list of BlockArgs namedtuples of block args.\"\"\"\n assert isinstance(string_list, list)\n blocks_args = []\n for block_string in string_list:\n blocks_args.append(BlockDecoder._decode_block_string(block_string))\n return blocks_args\n\n def encode(blocks_args):\n \"\"\"Encode a list of BlockArgs to a list of strings. Args: blocks_args (list[namedtuples]): A list of BlockArgs namedtuples of block args. Returns: block_strings: A list of strings, each string is a notation of block.\"\"\"\n block_strings = []\n for block in blocks_args:\n block_strings.append(BlockDecoder._encode_block_string(block))\n return block_strings\n", "source": "the_stack_v2_python_sparse", "source_path": "models/effnetv2.py", "source_repo": "zhangxiwensjtu/classification", "split": "test", "star_events_count": 0} {"blob_id": "805247b1df525a81df931e92db09ce35b27c7f21", "bodies": ["super(CustomCrossEntropyLoss, self).__init__()\nself.use_sigmoid = desc['use_sigmoid'] if 'use_sigmoid' in desc else False\nself.use_mask = desc['use_mask'] if 'use_mask' in desc else False\nself.reduction = desc['reduction'] if 'reduction' in desc else 'mean'\nself.loss_weight = desc['loss_weight'] if 'loss_weight' in desc else 1.0\nif self.use_sigmoid:\n self.loss_function = binary_cross_entropy\nelif self.use_mask:\n self.loss_function = mask_cross_entropy\nelse:\n self.loss_function = cross_entropy", "assert reduction_override in (None, 'none', 'mean', 'sum')\nreduction = reduction_override if reduction_override else self.reduction\nloss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs)\nreturn loss_cls"], "bodies_text": "<|body_start_0|>\n super(CustomCrossEntropyLoss, self).__init__()\n self.use_sigmoid = desc['use_sigmoid'] if 'use_sigmoid' in desc else False\n self.use_mask = desc['use_mask'] if 'use_mask' in desc else False\n self.reduction = desc['reduction'] if 'reduction' in desc else 'mean'\n self.loss_weight = desc['loss_weight'] if 'loss_weight' in desc else 1.0\n if self.use_sigmoid:\n self.loss_function = binary_cross_entropy\n elif self.use_mask:\n self.loss_function = mask_cross_entropy\n else:\n self.loss_function = cross_entropy\n<|end_body_0|>\n\n<|body_start_1|>\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = reduction_override if reduction_override else self.reduction\n loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs)\n return loss_cls\n<|end_body_1|>\n", "class_docstring": "Cross Entropy Loss.", "class_name": "CustomCrossEntropyLoss", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass CustomCrossEntropyLoss:\n \"\"\"Cross Entropy Loss.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init Cross Entropy loss. :param desc: config dict\"\"\"\n <|body_0|>\n\n def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs):\n \"\"\"Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CustomCrossEntropyLoss, self).__init__()\n self.use_sigmoid = desc['use_sigmoid'] if 'use_sigmoid' in desc else False\n self.use_mask = desc['use_mask'] if 'use_mask' in desc else False\n self.reduction = desc['reduction'] if 'reduction' in desc else 'mean'\n self.loss_weight = desc['loss_weight'] if 'loss_weight' in desc else 1.0\n if self.use_sigmoid:\n self.loss_function = binary_cross_entropy\n elif self.use_mask:\n self.loss_function = mask_cross_entropy\n else:\n self.loss_function = cross_entropy\n<|end_body_0|>\n\n<|body_start_1|>\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = reduction_override if reduction_override else self.reduction\n loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs)\n return loss_cls\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000486", "length_bytes": 4653, "license_type": "permissive", "methods": [{"docstring": "Init Cross Entropy loss. :param desc: config dict", "name": "__init__", "signature": "def __init__(self, desc)"}, {"docstring": "Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss", "name": "forward", "signature": "def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_028657", "prompt": "Implement the Python class `CustomCrossEntropyLoss` described below.\n\nClass description:\nCross Entropy Loss.\n\nMethod signatures and docstrings:\n- def __init__(self, desc): Init Cross Entropy loss. :param desc: config dict\n- def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss", "prompted_full_text": "Implement the Python class `CustomCrossEntropyLoss` described below.\n\nClass description:\nCross Entropy Loss.\n\nMethod signatures and docstrings:\n- def __init__(self, desc): Init Cross Entropy loss. :param desc: config dict\n- def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs): Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss\n\n<|skeleton|>\nclass CustomCrossEntropyLoss:\n \"\"\"Cross Entropy Loss.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init Cross Entropy loss. :param desc: config dict\"\"\"\n <|body_0|>\n\n def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs):\n \"\"\"Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(CustomCrossEntropyLoss, self).__init__()\n self.use_sigmoid = desc['use_sigmoid'] if 'use_sigmoid' in desc else False\n self.use_mask = desc['use_mask'] if 'use_mask' in desc else False\n self.reduction = desc['reduction'] if 'reduction' in desc else 'mean'\n self.loss_weight = desc['loss_weight'] if 'loss_weight' in desc else 1.0\n if self.use_sigmoid:\n self.loss_function = binary_cross_entropy\n elif self.use_mask:\n self.loss_function = mask_cross_entropy\n else:\n self.loss_function = cross_entropy\n<|end_body_0|>\n\n<|body_start_1|>\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = reduction_override if reduction_override else self.reduction\n loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs)\n return loss_cls\n<|end_body_1|>\n", "revision_id": "e4ef3a1c92d19d1d08c3ef0e2156b6fecefdbe04", "skeleton": "<|skeleton|>\nclass CustomCrossEntropyLoss:\n \"\"\"Cross Entropy Loss.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init Cross Entropy loss. :param desc: config dict\"\"\"\n <|body_0|>\n\n def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs):\n \"\"\"Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class CustomCrossEntropyLoss:\n \"\"\"Cross Entropy Loss.\"\"\"\n\n def __init__(self, desc):\n \"\"\"Init Cross Entropy loss. :param desc: config dict\"\"\"\n super(CustomCrossEntropyLoss, self).__init__()\n self.use_sigmoid = desc['use_sigmoid'] if 'use_sigmoid' in desc else False\n self.use_mask = desc['use_mask'] if 'use_mask' in desc else False\n self.reduction = desc['reduction'] if 'reduction' in desc else 'mean'\n self.loss_weight = desc['loss_weight'] if 'loss_weight' in desc else 1.0\n if self.use_sigmoid:\n self.loss_function = binary_cross_entropy\n elif self.use_mask:\n self.loss_function = mask_cross_entropy\n else:\n self.loss_function = cross_entropy\n\n def forward(self, cls_score, label, weight=None, avg_factor=None, reduction_override=None, **kwargs):\n \"\"\"Forward compute. :param cls_score: class score :param label: gt labels :param weight: weights :param avg_factor: avg factor :param reduction_override: reduce function :return: loss\"\"\"\n assert reduction_override in (None, 'none', 'mean', 'sum')\n reduction = reduction_override if reduction_override else self.reduction\n loss_cls = self.loss_weight * self.loss_function(cls_score, label, weight, reduction=reduction, avg_factor=avg_factor, **kwargs)\n return loss_cls\n", "source": "the_stack_v2_python_sparse", "source_path": "zeus/networks/pytorch/losses/custom_cross_entropy_loss.py", "source_repo": "huawei-noah/xingtian", "split": "test", "star_events_count": 308} {"blob_id": "14575d89d064822bbbfca822624e26101a8435a6", "bodies": ["if operations not in ['f', 'b', 'fb', 'bf']:\n raise ValueError(\"'operations' parameter should be one of the following options: f, b, fb, bf.\")\nself.feature = self.parse_feature(feature)\nself.operations = operations\nself.value = value\nself.axis = axis", "if not isinstance(data, np.ndarray) or data.ndim != 2:\n raise ValueError('Wrong data input')\nif operation not in ['f', 'b']:\n raise ValueError(\"'operation' parameter should either be 'f' (forward) or 'b' (backward)!\")\nn_rows, n_frames = data.shape\nvalue_mask = np.isnan(data) if np.isnan(value) else data == value\ninit_index = 0 if operation == 'f' else n_frames - 1\nidx = np.where(value_mask, init_index, np.arange(n_frames))\nif operation == 'f':\n idx = np.maximum.accumulate(idx, axis=1)\nelse:\n idx = idx[:, ::-1]\n idx = np.minimum.accumulate(idx, axis=1)\n idx = idx[:, ::-1]\nreturn data[np.arange(n_rows)[:, np.newaxis], idx]", "data = eopatch[self.feature]\nvalue_mask = np.isnan(data) if np.isnan(self.value) else data == self.value\nif not value_mask.any():\n return eopatch\ndata = np.swapaxes(data, self.axis, -1)\noriginal_shape = data.shape\ndata = data.reshape(np.prod(original_shape[:-1]), original_shape[-1])\nfor operation in self.operations:\n operation = cast(Literal['f', 'b'], operation)\n data = self.fill(data, value=self.value, operation=operation)\ndata = data.reshape(*original_shape)\ndata = np.swapaxes(data, self.axis, -1)\neopatch[self.feature] = data\nreturn eopatch"], "bodies_text": "<|body_start_0|>\n if operations not in ['f', 'b', 'fb', 'bf']:\n raise ValueError(\"'operations' parameter should be one of the following options: f, b, fb, bf.\")\n self.feature = self.parse_feature(feature)\n self.operations = operations\n self.value = value\n self.axis = axis\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(data, np.ndarray) or data.ndim != 2:\n raise ValueError('Wrong data input')\n if operation not in ['f', 'b']:\n raise ValueError(\"'operation' parameter should either be 'f' (forward) or 'b' (backward)!\")\n n_rows, n_frames = data.shape\n value_mask = np.isnan(data) if np.isnan(value) else data == value\n init_index = 0 if operation == 'f' else n_frames - 1\n idx = np.where(value_mask, init_index, np.arange(n_frames))\n if operation == 'f':\n idx = np.maximum.accumulate(idx, axis=1)\n else:\n idx = idx[:, ::-1]\n idx = np.minimum.accumulate(idx, axis=1)\n idx = idx[:, ::-1]\n return data[np.arange(n_rows)[:, np.newaxis], idx]\n<|end_body_1|>\n\n<|body_start_2|>\n data = eopatch[self.feature]\n value_mask = np.isnan(data) if np.isnan(self.value) else data == self.value\n if not value_mask.any():\n return eopatch\n data = np.swapaxes(data, self.axis, -1)\n original_shape = data.shape\n data = data.reshape(np.prod(original_shape[:-1]), original_shape[-1])\n for operation in self.operations:\n operation = cast(Literal['f', 'b'], operation)\n data = self.fill(data, value=self.value, operation=operation)\n data = data.reshape(*original_shape)\n data = np.swapaxes(data, self.axis, -1)\n eopatch[self.feature] = data\n return eopatch\n<|end_body_2|>\n", "class_docstring": "Overwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0", "class_name": "ValueFilloutTask", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ValueFilloutTask:\n \"\"\"Overwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0\"\"\"\n\n def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0):\n \"\"\":param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.\"\"\"\n <|body_0|>\n\n def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray:\n \"\"\"Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.\"\"\"\n <|body_1|>\n\n def execute(self, eopatch: EOPatch) -> EOPatch:\n \"\"\":param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if operations not in ['f', 'b', 'fb', 'bf']:\n raise ValueError(\"'operations' parameter should be one of the following options: f, b, fb, bf.\")\n self.feature = self.parse_feature(feature)\n self.operations = operations\n self.value = value\n self.axis = axis\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(data, np.ndarray) or data.ndim != 2:\n raise ValueError('Wrong data input')\n if operation not in ['f', 'b']:\n raise ValueError(\"'operation' parameter should either be 'f' (forward) or 'b' (backward)!\")\n n_rows, n_frames = data.shape\n value_mask = np.isnan(data) if np.isnan(value) else data == value\n init_index = 0 if operation == 'f' else n_frames - 1\n idx = np.where(value_mask, init_index, np.arange(n_frames))\n if operation == 'f':\n idx = np.maximum.accumulate(idx, axis=1)\n else:\n idx = idx[:, ::-1]\n idx = np.minimum.accumulate(idx, axis=1)\n idx = idx[:, ::-1]\n return data[np.arange(n_rows)[:, np.newaxis], idx]\n<|end_body_1|>\n\n<|body_start_2|>\n data = eopatch[self.feature]\n value_mask = np.isnan(data) if np.isnan(self.value) else data == self.value\n if not value_mask.any():\n return eopatch\n data = np.swapaxes(data, self.axis, -1)\n original_shape = data.shape\n data = data.reshape(np.prod(original_shape[:-1]), original_shape[-1])\n for operation in self.operations:\n operation = cast(Literal['f', 'b'], operation)\n data = self.fill(data, value=self.value, operation=operation)\n data = data.reshape(*original_shape)\n data = np.swapaxes(data, self.axis, -1)\n eopatch[self.feature] = data\n return eopatch\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000487", "length_bytes": 12661, "license_type": "permissive", "methods": [{"docstring": ":param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.", "name": "__init__", "signature": "def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0)"}, {"docstring": "Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.", "name": "fill", "signature": "def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray"}, {"docstring": ":param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.", "name": "execute", "signature": "def execute(self, eopatch: EOPatch) -> EOPatch"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_012280", "prompt": "Implement the Python class `ValueFilloutTask` described below.\n\nClass description:\nOverwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0\n\nMethod signatures and docstrings:\n- def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0): :param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.\n- def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray: Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.\n- def execute(self, eopatch: EOPatch) -> EOPatch: :param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.", "prompted_full_text": "Implement the Python class `ValueFilloutTask` described below.\n\nClass description:\nOverwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0\n\nMethod signatures and docstrings:\n- def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0): :param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.\n- def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray: Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.\n- def execute(self, eopatch: EOPatch) -> EOPatch: :param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.\n\n<|skeleton|>\nclass ValueFilloutTask:\n \"\"\"Overwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0\"\"\"\n\n def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0):\n \"\"\":param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.\"\"\"\n <|body_0|>\n\n def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray:\n \"\"\"Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.\"\"\"\n <|body_1|>\n\n def execute(self, eopatch: EOPatch) -> EOPatch:\n \"\"\":param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n if operations not in ['f', 'b', 'fb', 'bf']:\n raise ValueError(\"'operations' parameter should be one of the following options: f, b, fb, bf.\")\n self.feature = self.parse_feature(feature)\n self.operations = operations\n self.value = value\n self.axis = axis\n<|end_body_0|>\n\n<|body_start_1|>\n if not isinstance(data, np.ndarray) or data.ndim != 2:\n raise ValueError('Wrong data input')\n if operation not in ['f', 'b']:\n raise ValueError(\"'operation' parameter should either be 'f' (forward) or 'b' (backward)!\")\n n_rows, n_frames = data.shape\n value_mask = np.isnan(data) if np.isnan(value) else data == value\n init_index = 0 if operation == 'f' else n_frames - 1\n idx = np.where(value_mask, init_index, np.arange(n_frames))\n if operation == 'f':\n idx = np.maximum.accumulate(idx, axis=1)\n else:\n idx = idx[:, ::-1]\n idx = np.minimum.accumulate(idx, axis=1)\n idx = idx[:, ::-1]\n return data[np.arange(n_rows)[:, np.newaxis], idx]\n<|end_body_1|>\n\n<|body_start_2|>\n data = eopatch[self.feature]\n value_mask = np.isnan(data) if np.isnan(self.value) else data == self.value\n if not value_mask.any():\n return eopatch\n data = np.swapaxes(data, self.axis, -1)\n original_shape = data.shape\n data = data.reshape(np.prod(original_shape[:-1]), original_shape[-1])\n for operation in self.operations:\n operation = cast(Literal['f', 'b'], operation)\n data = self.fill(data, value=self.value, operation=operation)\n data = data.reshape(*original_shape)\n data = np.swapaxes(data, self.axis, -1)\n eopatch[self.feature] = data\n return eopatch\n<|end_body_2|>\n", "revision_id": "a65899e4632b50c9c41a67e1f7698c09b929d840", "skeleton": "<|skeleton|>\nclass ValueFilloutTask:\n \"\"\"Overwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0\"\"\"\n\n def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0):\n \"\"\":param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.\"\"\"\n <|body_0|>\n\n def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray:\n \"\"\"Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.\"\"\"\n <|body_1|>\n\n def execute(self, eopatch: EOPatch) -> EOPatch:\n \"\"\":param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ValueFilloutTask:\n \"\"\"Overwrites occurrences of a desired value with their neighbor values in either forward, backward direction or both, along an axis. Possible fillout operations are 'f' (forward), 'b' (backward) or both, 'fb' or 'bf': 'f': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> nan, nan, nan, 8, 5, 5, 1, 0, 0, 0 'b': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, nan, nan 'fb': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 5, 1, 0, 0, 0 'bf': nan, nan, nan, 8, 5, nan, 1, 0, nan, nan -> 8, 8, 8, 8, 5, 1, 1, 0, 0, 0\"\"\"\n\n def __init__(self, feature: SingleFeatureSpec, operations: Literal['f', 'b', 'fb', 'bf']='fb', value: float=np.nan, axis: int=0):\n \"\"\":param feature: A feature that must be value-filled. :param operations: Fill directions, which should be one of ['f', 'b', 'fb', 'bf']. :param value: Which value to fill by its neighbors. :param axis: An axis along which to fill values.\"\"\"\n if operations not in ['f', 'b', 'fb', 'bf']:\n raise ValueError(\"'operations' parameter should be one of the following options: f, b, fb, bf.\")\n self.feature = self.parse_feature(feature)\n self.operations = operations\n self.value = value\n self.axis = axis\n\n def fill(data: np.ndarray, value: float=np.nan, operation: Literal['f', 'b']='f') -> np.ndarray:\n \"\"\"Fills occurrences of a desired value in a 2d array with their neighbors in either forward or backward direction. :param data: A 2d numpy array. :param value: Which value to fill by its neighbors. :param operation: Fill directions, which should be either 'f' or 'b'. :return: Value-filled numpy array.\"\"\"\n if not isinstance(data, np.ndarray) or data.ndim != 2:\n raise ValueError('Wrong data input')\n if operation not in ['f', 'b']:\n raise ValueError(\"'operation' parameter should either be 'f' (forward) or 'b' (backward)!\")\n n_rows, n_frames = data.shape\n value_mask = np.isnan(data) if np.isnan(value) else data == value\n init_index = 0 if operation == 'f' else n_frames - 1\n idx = np.where(value_mask, init_index, np.arange(n_frames))\n if operation == 'f':\n idx = np.maximum.accumulate(idx, axis=1)\n else:\n idx = idx[:, ::-1]\n idx = np.minimum.accumulate(idx, axis=1)\n idx = idx[:, ::-1]\n return data[np.arange(n_rows)[:, np.newaxis], idx]\n\n def execute(self, eopatch: EOPatch) -> EOPatch:\n \"\"\":param eopatch: Source EOPatch from which to read the feature data. :return: An eopatch with the value-filled feature.\"\"\"\n data = eopatch[self.feature]\n value_mask = np.isnan(data) if np.isnan(self.value) else data == self.value\n if not value_mask.any():\n return eopatch\n data = np.swapaxes(data, self.axis, -1)\n original_shape = data.shape\n data = data.reshape(np.prod(original_shape[:-1]), original_shape[-1])\n for operation in self.operations:\n operation = cast(Literal['f', 'b'], operation)\n data = self.fill(data, value=self.value, operation=operation)\n data = data.reshape(*original_shape)\n data = np.swapaxes(data, self.axis, -1)\n eopatch[self.feature] = data\n return eopatch\n", "source": "the_stack_v2_python_sparse", "source_path": "features/eolearn/features/feature_manipulation.py", "source_repo": "sentinel-hub/eo-learn", "split": "test", "star_events_count": 1072} {"blob_id": "163469a07c686fb8a6d640adc9eac66c65f69567", "bodies": ["current_page = int(self.request.query_params.get('current_page'))\npage_size = int(self.request.query_params.get('page_size'))\nnow = datetime.datetime.now()\nevents = Event.objects.filter(date__gte=now).order_by('date')\nevents_serializer = EventSerializer(events, many=True)\npaginator = Paginator(events_serializer.data, page_size)\npage = paginator.get_page(current_page)\nreturn_data = dict()\nreturn_data['events'] = page.object_list\nreturn_data['last_page'] = paginator.num_pages\nreturn JsonResponse(return_data)", "event = Event.objects.get(id=request.data['event_id'])\nevent_serializer = EventSerializer(event)\nticket_types = event.ticket_types.all()\nticket_type_serializer = TicketTypeSerializer(ticket_types, many=True)\nserialized_ticket_types = ticket_type_serializer.data\nticket_counters = dict()\nfor ticket in serialized_ticket_types:\n ticket_counters[ticket['type']] = event.tickets_ordered.filter(Q(type=ticket['type'], reservation__status='PENDING') | Q(type=ticket['type'], reservation__status='COMPLETED')).count()\nfor ticket in serialized_ticket_types:\n ticket['tickets_left'] = ticket['amount'] - ticket_counters[ticket['type']]\nreturn_data = dict()\nreturn_data['event'] = event_serializer.data\nreturn_data['ticket_types'] = ticket_type_serializer.data\nreturn JsonResponse(return_data)"], "bodies_text": "<|body_start_0|>\n current_page = int(self.request.query_params.get('current_page'))\n page_size = int(self.request.query_params.get('page_size'))\n now = datetime.datetime.now()\n events = Event.objects.filter(date__gte=now).order_by('date')\n events_serializer = EventSerializer(events, many=True)\n paginator = Paginator(events_serializer.data, page_size)\n page = paginator.get_page(current_page)\n return_data = dict()\n return_data['events'] = page.object_list\n return_data['last_page'] = paginator.num_pages\n return JsonResponse(return_data)\n<|end_body_0|>\n\n<|body_start_1|>\n event = Event.objects.get(id=request.data['event_id'])\n event_serializer = EventSerializer(event)\n ticket_types = event.ticket_types.all()\n ticket_type_serializer = TicketTypeSerializer(ticket_types, many=True)\n serialized_ticket_types = ticket_type_serializer.data\n ticket_counters = dict()\n for ticket in serialized_ticket_types:\n ticket_counters[ticket['type']] = event.tickets_ordered.filter(Q(type=ticket['type'], reservation__status='PENDING') | Q(type=ticket['type'], reservation__status='COMPLETED')).count()\n for ticket in serialized_ticket_types:\n ticket['tickets_left'] = ticket['amount'] - ticket_counters[ticket['type']]\n return_data = dict()\n return_data['event'] = event_serializer.data\n return_data['ticket_types'] = ticket_type_serializer.data\n return JsonResponse(return_data)\n<|end_body_1|>\n", "class_docstring": "", "class_name": "EventViewSet", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass EventViewSet:\n\n def list(self, request):\n \"\"\"Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number\"\"\"\n <|body_0|>\n\n def create(self, request):\n \"\"\"Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n current_page = int(self.request.query_params.get('current_page'))\n page_size = int(self.request.query_params.get('page_size'))\n now = datetime.datetime.now()\n events = Event.objects.filter(date__gte=now).order_by('date')\n events_serializer = EventSerializer(events, many=True)\n paginator = Paginator(events_serializer.data, page_size)\n page = paginator.get_page(current_page)\n return_data = dict()\n return_data['events'] = page.object_list\n return_data['last_page'] = paginator.num_pages\n return JsonResponse(return_data)\n<|end_body_0|>\n\n<|body_start_1|>\n event = Event.objects.get(id=request.data['event_id'])\n event_serializer = EventSerializer(event)\n ticket_types = event.ticket_types.all()\n ticket_type_serializer = TicketTypeSerializer(ticket_types, many=True)\n serialized_ticket_types = ticket_type_serializer.data\n ticket_counters = dict()\n for ticket in serialized_ticket_types:\n ticket_counters[ticket['type']] = event.tickets_ordered.filter(Q(type=ticket['type'], reservation__status='PENDING') | Q(type=ticket['type'], reservation__status='COMPLETED')).count()\n for ticket in serialized_ticket_types:\n ticket['tickets_left'] = ticket['amount'] - ticket_counters[ticket['type']]\n return_data = dict()\n return_data['event'] = event_serializer.data\n return_data['ticket_types'] = ticket_type_serializer.data\n return JsonResponse(return_data)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000488", "length_bytes": 12892, "license_type": "no_license", "methods": [{"docstring": "Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number", "name": "list", "signature": "def list(self, request)"}, {"docstring": "Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available", "name": "create", "signature": "def create(self, request)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003905", "prompt": "Implement the Python class `EventViewSet` described below.\n\nClass description:\nImplement the EventViewSet class.\n\nMethod signatures and docstrings:\n- def list(self, request): Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number\n- def create(self, request): Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available", "prompted_full_text": "Implement the Python class `EventViewSet` described below.\n\nClass description:\nImplement the EventViewSet class.\n\nMethod signatures and docstrings:\n- def list(self, request): Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number\n- def create(self, request): Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available\n\n<|skeleton|>\nclass EventViewSet:\n\n def list(self, request):\n \"\"\"Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number\"\"\"\n <|body_0|>\n\n def create(self, request):\n \"\"\"Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n current_page = int(self.request.query_params.get('current_page'))\n page_size = int(self.request.query_params.get('page_size'))\n now = datetime.datetime.now()\n events = Event.objects.filter(date__gte=now).order_by('date')\n events_serializer = EventSerializer(events, many=True)\n paginator = Paginator(events_serializer.data, page_size)\n page = paginator.get_page(current_page)\n return_data = dict()\n return_data['events'] = page.object_list\n return_data['last_page'] = paginator.num_pages\n return JsonResponse(return_data)\n<|end_body_0|>\n\n<|body_start_1|>\n event = Event.objects.get(id=request.data['event_id'])\n event_serializer = EventSerializer(event)\n ticket_types = event.ticket_types.all()\n ticket_type_serializer = TicketTypeSerializer(ticket_types, many=True)\n serialized_ticket_types = ticket_type_serializer.data\n ticket_counters = dict()\n for ticket in serialized_ticket_types:\n ticket_counters[ticket['type']] = event.tickets_ordered.filter(Q(type=ticket['type'], reservation__status='PENDING') | Q(type=ticket['type'], reservation__status='COMPLETED')).count()\n for ticket in serialized_ticket_types:\n ticket['tickets_left'] = ticket['amount'] - ticket_counters[ticket['type']]\n return_data = dict()\n return_data['event'] = event_serializer.data\n return_data['ticket_types'] = ticket_type_serializer.data\n return JsonResponse(return_data)\n<|end_body_1|>\n", "revision_id": "519535a65cfc83b4c1b20b3ea8ceb359848d58ad", "skeleton": "<|skeleton|>\nclass EventViewSet:\n\n def list(self, request):\n \"\"\"Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number\"\"\"\n <|body_0|>\n\n def create(self, request):\n \"\"\"Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class EventViewSet:\n def list(self, request):\n \"\"\"Endpoint returns paginated list of all events available in the database. Required params: - current_page: number (starting from 1) - page_size: number :param request: :return: - events: list - list of events - last_page: number\"\"\"\n current_page = int(self.request.query_params.get('current_page'))\n page_size = int(self.request.query_params.get('page_size'))\n now = datetime.datetime.now()\n events = Event.objects.filter(date__gte=now).order_by('date')\n events_serializer = EventSerializer(events, many=True)\n paginator = Paginator(events_serializer.data, page_size)\n page = paginator.get_page(current_page)\n return_data = dict()\n return_data['events'] = page.object_list\n return_data['last_page'] = paginator.num_pages\n return JsonResponse(return_data)\n\n def create(self, request):\n \"\"\"Endpoint returns detailed info about particular event based on given event id. Required params: - event_id: string :param request: :return: Event data, types of tickets, amount of tickets available\"\"\"\n event = Event.objects.get(id=request.data['event_id'])\n event_serializer = EventSerializer(event)\n ticket_types = event.ticket_types.all()\n ticket_type_serializer = TicketTypeSerializer(ticket_types, many=True)\n serialized_ticket_types = ticket_type_serializer.data\n ticket_counters = dict()\n for ticket in serialized_ticket_types:\n ticket_counters[ticket['type']] = event.tickets_ordered.filter(Q(type=ticket['type'], reservation__status='PENDING') | Q(type=ticket['type'], reservation__status='COMPLETED')).count()\n for ticket in serialized_ticket_types:\n ticket['tickets_left'] = ticket['amount'] - ticket_counters[ticket['type']]\n return_data = dict()\n return_data['event'] = event_serializer.data\n return_data['ticket_types'] = ticket_type_serializer.data\n return JsonResponse(return_data)\n", "source": "the_stack_v2_python_sparse", "source_path": "ticketonline/apps/events/views.py", "source_repo": "Mat2314/ticketonline", "split": "test", "star_events_count": 0} {"blob_id": "884ba1b274da0f5736845de349da2a4c344ef87c", "bodies": ["super(SGDBenchmark, self).__init__(config_path, config)\nif not self.config:\n self.config = objdict(SGD_DEFAULTS.copy())\nfor key in SGD_DEFAULTS:\n if key not in self.config:\n self.config[key] = SGD_DEFAULTS[key]", "if 'instance_set' not in self.config.keys():\n self.read_instance_set()\nif 'test_set' not in self.config.keys() and 'test_set_path' in self.config.keys():\n self.read_instance_set(test=True)\nenv = SGDEnv(self.config)\nfor func in self.wrap_funcs:\n env = func(env)\nreturn env", "if test:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.test_set_path\n keyword = 'test_set'\nelse:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.instance_set_path\n keyword = 'instance_set'\nself.config[keyword] = {}\nwith open(path, 'r') as fh:\n reader = csv.DictReader(fh, delimiter=';')\n for row in reader:\n if '_' in row['dataset']:\n dataset_info = row['dataset'].split('_')\n dataset_name = dataset_info[0]\n dataset_size = int(dataset_info[1])\n else:\n dataset_name = row['dataset']\n dataset_size = None\n instance = [dataset_name, int(row['seed']), row['architecture'], int(row['steps']), dataset_size]\n self.config[keyword][int(row['ID'])] = instance", "self.config = objdict(SGD_DEFAULTS.copy())\nif instance_set_path is not None:\n self.config['instance_set_path'] = instance_set_path\nself.config.seed = seed\nself.read_instance_set()\nreturn SGDEnv(self.config)"], "bodies_text": "<|body_start_0|>\n super(SGDBenchmark, self).__init__(config_path, config)\n if not self.config:\n self.config = objdict(SGD_DEFAULTS.copy())\n for key in SGD_DEFAULTS:\n if key not in self.config:\n self.config[key] = SGD_DEFAULTS[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if 'instance_set' not in self.config.keys():\n self.read_instance_set()\n if 'test_set' not in self.config.keys() and 'test_set_path' in self.config.keys():\n self.read_instance_set(test=True)\n env = SGDEnv(self.config)\n for func in self.wrap_funcs:\n env = func(env)\n return env\n<|end_body_1|>\n\n<|body_start_2|>\n if test:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.test_set_path\n keyword = 'test_set'\n else:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.instance_set_path\n keyword = 'instance_set'\n self.config[keyword] = {}\n with open(path, 'r') as fh:\n reader = csv.DictReader(fh, delimiter=';')\n for row in reader:\n if '_' in row['dataset']:\n dataset_info = row['dataset'].split('_')\n dataset_name = dataset_info[0]\n dataset_size = int(dataset_info[1])\n else:\n dataset_name = row['dataset']\n dataset_size = None\n instance = [dataset_name, int(row['seed']), row['architecture'], int(row['steps']), dataset_size]\n self.config[keyword][int(row['ID'])] = instance\n<|end_body_2|>\n\n<|body_start_3|>\n self.config = objdict(SGD_DEFAULTS.copy())\n if instance_set_path is not None:\n self.config['instance_set_path'] = instance_set_path\n self.config.seed = seed\n self.read_instance_set()\n return SGDEnv(self.config)\n<|end_body_3|>\n", "class_docstring": "Benchmark with default configuration & relevant functions for SGD", "class_name": "SGDBenchmark", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass SGDBenchmark:\n \"\"\"Benchmark with default configuration & relevant functions for SGD\"\"\"\n\n def __init__(self, config_path=None, config=None):\n \"\"\"Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)\"\"\"\n <|body_0|>\n\n def get_environment(self):\n \"\"\"Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment\"\"\"\n <|body_1|>\n\n def read_instance_set(self, test=False):\n \"\"\"Read path of instances from config into list\"\"\"\n <|body_2|>\n\n def get_benchmark(self, instance_set_path=None, seed=0):\n \"\"\"Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SGDBenchmark, self).__init__(config_path, config)\n if not self.config:\n self.config = objdict(SGD_DEFAULTS.copy())\n for key in SGD_DEFAULTS:\n if key not in self.config:\n self.config[key] = SGD_DEFAULTS[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if 'instance_set' not in self.config.keys():\n self.read_instance_set()\n if 'test_set' not in self.config.keys() and 'test_set_path' in self.config.keys():\n self.read_instance_set(test=True)\n env = SGDEnv(self.config)\n for func in self.wrap_funcs:\n env = func(env)\n return env\n<|end_body_1|>\n\n<|body_start_2|>\n if test:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.test_set_path\n keyword = 'test_set'\n else:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.instance_set_path\n keyword = 'instance_set'\n self.config[keyword] = {}\n with open(path, 'r') as fh:\n reader = csv.DictReader(fh, delimiter=';')\n for row in reader:\n if '_' in row['dataset']:\n dataset_info = row['dataset'].split('_')\n dataset_name = dataset_info[0]\n dataset_size = int(dataset_info[1])\n else:\n dataset_name = row['dataset']\n dataset_size = None\n instance = [dataset_name, int(row['seed']), row['architecture'], int(row['steps']), dataset_size]\n self.config[keyword][int(row['ID'])] = instance\n<|end_body_2|>\n\n<|body_start_3|>\n self.config = objdict(SGD_DEFAULTS.copy())\n if instance_set_path is not None:\n self.config['instance_set_path'] = instance_set_path\n self.config.seed = seed\n self.read_instance_set()\n return SGDEnv(self.config)\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000489", "length_bytes": 6789, "license_type": "permissive", "methods": [{"docstring": "Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)", "name": "__init__", "signature": "def __init__(self, config_path=None, config=None)"}, {"docstring": "Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment", "name": "get_environment", "signature": "def get_environment(self)"}, {"docstring": "Read path of instances from config into list", "name": "read_instance_set", "signature": "def read_instance_set(self, test=False)"}, {"docstring": "Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment", "name": "get_benchmark", "signature": "def get_benchmark(self, instance_set_path=None, seed=0)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_004165", "prompt": "Implement the Python class `SGDBenchmark` described below.\n\nClass description:\nBenchmark with default configuration & relevant functions for SGD\n\nMethod signatures and docstrings:\n- def __init__(self, config_path=None, config=None): Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)\n- def get_environment(self): Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment\n- def read_instance_set(self, test=False): Read path of instances from config into list\n- def get_benchmark(self, instance_set_path=None, seed=0): Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment", "prompted_full_text": "Implement the Python class `SGDBenchmark` described below.\n\nClass description:\nBenchmark with default configuration & relevant functions for SGD\n\nMethod signatures and docstrings:\n- def __init__(self, config_path=None, config=None): Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)\n- def get_environment(self): Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment\n- def read_instance_set(self, test=False): Read path of instances from config into list\n- def get_benchmark(self, instance_set_path=None, seed=0): Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment\n\n<|skeleton|>\nclass SGDBenchmark:\n \"\"\"Benchmark with default configuration & relevant functions for SGD\"\"\"\n\n def __init__(self, config_path=None, config=None):\n \"\"\"Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)\"\"\"\n <|body_0|>\n\n def get_environment(self):\n \"\"\"Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment\"\"\"\n <|body_1|>\n\n def read_instance_set(self, test=False):\n \"\"\"Read path of instances from config into list\"\"\"\n <|body_2|>\n\n def get_benchmark(self, instance_set_path=None, seed=0):\n \"\"\"Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(SGDBenchmark, self).__init__(config_path, config)\n if not self.config:\n self.config = objdict(SGD_DEFAULTS.copy())\n for key in SGD_DEFAULTS:\n if key not in self.config:\n self.config[key] = SGD_DEFAULTS[key]\n<|end_body_0|>\n\n<|body_start_1|>\n if 'instance_set' not in self.config.keys():\n self.read_instance_set()\n if 'test_set' not in self.config.keys() and 'test_set_path' in self.config.keys():\n self.read_instance_set(test=True)\n env = SGDEnv(self.config)\n for func in self.wrap_funcs:\n env = func(env)\n return env\n<|end_body_1|>\n\n<|body_start_2|>\n if test:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.test_set_path\n keyword = 'test_set'\n else:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.instance_set_path\n keyword = 'instance_set'\n self.config[keyword] = {}\n with open(path, 'r') as fh:\n reader = csv.DictReader(fh, delimiter=';')\n for row in reader:\n if '_' in row['dataset']:\n dataset_info = row['dataset'].split('_')\n dataset_name = dataset_info[0]\n dataset_size = int(dataset_info[1])\n else:\n dataset_name = row['dataset']\n dataset_size = None\n instance = [dataset_name, int(row['seed']), row['architecture'], int(row['steps']), dataset_size]\n self.config[keyword][int(row['ID'])] = instance\n<|end_body_2|>\n\n<|body_start_3|>\n self.config = objdict(SGD_DEFAULTS.copy())\n if instance_set_path is not None:\n self.config['instance_set_path'] = instance_set_path\n self.config.seed = seed\n self.read_instance_set()\n return SGDEnv(self.config)\n<|end_body_3|>\n", "revision_id": "d99b21ec844a46d6e18e729ab299f8e9051a68e8", "skeleton": "<|skeleton|>\nclass SGDBenchmark:\n \"\"\"Benchmark with default configuration & relevant functions for SGD\"\"\"\n\n def __init__(self, config_path=None, config=None):\n \"\"\"Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)\"\"\"\n <|body_0|>\n\n def get_environment(self):\n \"\"\"Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment\"\"\"\n <|body_1|>\n\n def read_instance_set(self, test=False):\n \"\"\"Read path of instances from config into list\"\"\"\n <|body_2|>\n\n def get_benchmark(self, instance_set_path=None, seed=0):\n \"\"\"Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class SGDBenchmark:\n \"\"\"Benchmark with default configuration & relevant functions for SGD\"\"\"\n\n def __init__(self, config_path=None, config=None):\n \"\"\"Initialize SGD Benchmark Parameters ------- config_path : str Path to config file (optional)\"\"\"\n super(SGDBenchmark, self).__init__(config_path, config)\n if not self.config:\n self.config = objdict(SGD_DEFAULTS.copy())\n for key in SGD_DEFAULTS:\n if key not in self.config:\n self.config[key] = SGD_DEFAULTS[key]\n\n def get_environment(self):\n \"\"\"Return SGDEnv env with current configuration Returns ------- SGDEnv SGD environment\"\"\"\n if 'instance_set' not in self.config.keys():\n self.read_instance_set()\n if 'test_set' not in self.config.keys() and 'test_set_path' in self.config.keys():\n self.read_instance_set(test=True)\n env = SGDEnv(self.config)\n for func in self.wrap_funcs:\n env = func(env)\n return env\n\n def read_instance_set(self, test=False):\n \"\"\"Read path of instances from config into list\"\"\"\n if test:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.test_set_path\n keyword = 'test_set'\n else:\n path = os.path.dirname(os.path.abspath(__file__)) + '/' + self.config.instance_set_path\n keyword = 'instance_set'\n self.config[keyword] = {}\n with open(path, 'r') as fh:\n reader = csv.DictReader(fh, delimiter=';')\n for row in reader:\n if '_' in row['dataset']:\n dataset_info = row['dataset'].split('_')\n dataset_name = dataset_info[0]\n dataset_size = int(dataset_info[1])\n else:\n dataset_name = row['dataset']\n dataset_size = None\n instance = [dataset_name, int(row['seed']), row['architecture'], int(row['steps']), dataset_size]\n self.config[keyword][int(row['ID'])] = instance\n\n def get_benchmark(self, instance_set_path=None, seed=0):\n \"\"\"Get benchmark from the LTO paper Parameters ------- seed : int Environment seed Returns ------- env : SGDEnv SGD environment\"\"\"\n self.config = objdict(SGD_DEFAULTS.copy())\n if instance_set_path is not None:\n self.config['instance_set_path'] = instance_set_path\n self.config.seed = seed\n self.read_instance_set()\n return SGDEnv(self.config)\n", "source": "the_stack_v2_python_sparse", "source_path": "dacbench/benchmarks/sgd_benchmark.py", "source_repo": "automl/DACBench", "split": "test", "star_events_count": 19} {"blob_id": "706e065d5a7f1fe0b5b92beff9432613340340a9", "bodies": ["keyword = '%student%'\ndata = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue'][1:-1]\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nself.assertEqual(res.status_code, 200, '查询失败')\nquery_results = dict_res(query_results['content'][0])\nquery_result_name = query_results['name']\nself.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')", "data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nquery_results = dict_res(query_results['content'][0])\nquery_result_flowType = query_results['flowType']\nself.assertEqual(res.status_code, 200, '查询失败')\nself.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')", "data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nself.assertEqual(res.status_code, 200, '查询失败')", "data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nfieldValue = data['fieldList'][0]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nself.assertEqual(res.status_code, 200, '查询失败')", "data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\ndata_name = data['fieldList'][0]['fieldValue'][1:-1]\ndata_flowType = data['fieldList'][1]['fieldValue']\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nself.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)", "end_time = get_time()\nstart_time = get_time() - 10 * 24 * 3600 * 1000\ndata = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\nres = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\nquery_results = dict_res(res.text)\nfirst_Time = query_results['content'][0]['lastModifiedTime']\nself.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')"], "bodies_text": "<|body_start_0|>\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n<|end_body_0|>\n\n<|body_start_1|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n<|end_body_1|>\n\n<|body_start_2|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_2|>\n\n<|body_start_3|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_3|>\n\n<|body_start_4|>\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n<|end_body_4|>\n\n<|body_start_5|>\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n<|end_body_5|>\n", "class_docstring": "测试查询schedulers接口 /api/schedulers/query", "class_name": "QuerySchedulers", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n<|end_body_0|>\n\n<|body_start_1|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n<|end_body_1|>\n\n<|body_start_2|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_2|>\n\n<|body_start_3|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_3|>\n\n<|body_start_4|>\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n<|end_body_4|>\n\n<|body_start_5|>\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n<|end_body_5|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000490", "length_bytes": 15511, "license_type": "no_license", "methods": [{"docstring": "根据scheduler name模糊查询", "name": "test_case01", "signature": "def test_case01(self)"}, {"docstring": "根据flowtype-dataflow查询", "name": "test_case02", "signature": "def test_case02(self)"}, {"docstring": "根据flowtype-workflow查询", "name": "test_case03", "signature": "def test_case03(self)"}, {"docstring": "根据flowtype-streamflow查询", "name": "test_case04", "signature": "def test_case04(self)"}, {"docstring": "flowtype+name组合查询scheduler", "name": "test_case05", "signature": "def test_case05(self)"}, {"docstring": "query:根据上次修改时间查询全部的scheduler", "name": "test_case06", "signature": "def test_case06(self)"}], "n_methods": 6, "original_id": "stack_v2_sparse_classes_30k_val_000538", "prompt": "Implement the Python class `QuerySchedulers` described below.\n\nClass description:\n测试查询schedulers接口 /api/schedulers/query\n\nMethod signatures and docstrings:\n- def test_case01(self): 根据scheduler name模糊查询\n- def test_case02(self): 根据flowtype-dataflow查询\n- def test_case03(self): 根据flowtype-workflow查询\n- def test_case04(self): 根据flowtype-streamflow查询\n- def test_case05(self): flowtype+name组合查询scheduler\n- def test_case06(self): query:根据上次修改时间查询全部的scheduler", "prompted_full_text": "Implement the Python class `QuerySchedulers` described below.\n\nClass description:\n测试查询schedulers接口 /api/schedulers/query\n\nMethod signatures and docstrings:\n- def test_case01(self): 根据scheduler name模糊查询\n- def test_case02(self): 根据flowtype-dataflow查询\n- def test_case03(self): 根据flowtype-workflow查询\n- def test_case04(self): 根据flowtype-streamflow查询\n- def test_case05(self): flowtype+name组合查询scheduler\n- def test_case06(self): query:根据上次修改时间查询全部的scheduler\n\n<|skeleton|>\nclass QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n <|body_5|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n<|end_body_0|>\n\n<|body_start_1|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n<|end_body_1|>\n\n<|body_start_2|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_2|>\n\n<|body_start_3|>\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n<|end_body_3|>\n\n<|body_start_4|>\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n<|end_body_4|>\n\n<|body_start_5|>\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n<|end_body_5|>\n", "revision_id": "fc41513af3063169ff1b17d6f01f7074057ceb1f", "skeleton": "<|skeleton|>\nclass QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n <|body_0|>\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n <|body_1|>\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n <|body_2|>\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n <|body_3|>\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n <|body_4|>\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n <|body_5|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QuerySchedulers:\n \"\"\"测试查询schedulers接口 /api/schedulers/query\"\"\"\n\n def test_case01(self):\n \"\"\"根据scheduler name模糊查询\"\"\"\n keyword = '%student%'\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': keyword, 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue'][1:-1]\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n query_results = dict_res(query_results['content'][0])\n query_result_name = query_results['name']\n self.assertIn(fieldValue, query_result_name, '查询结果中scheduler的name和查询关键词name不一致')\n\n def test_case02(self):\n \"\"\"根据flowtype-dataflow查询\"\"\"\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'dataflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n query_results = dict_res(query_results['content'][0])\n query_result_flowType = query_results['flowType']\n self.assertEqual(res.status_code, 200, '查询失败')\n self.assertEqual(fieldValue, query_result_flowType, '查询结果中scheduler关联flowtype和查询关键词flowType不一致')\n\n def test_case03(self):\n \"\"\"根据flowtype-workflow查询\"\"\"\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(res.status_code, 200, '查询失败')\n\n def test_case04(self):\n \"\"\"根据flowtype-streamflow查询\"\"\"\n data = {'fieldList': [{'fieldName': 'flowType', 'fieldValue': 'streamflow', 'comparatorOperator': 'LIKE'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n fieldValue = data['fieldList'][0]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n self.assertEqual(res.status_code, 200, '查询失败')\n\n def test_case05(self):\n \"\"\"flowtype+name组合查询scheduler\"\"\"\n data = {'fieldList': [{'fieldName': 'name', 'fieldValue': '%gbj%', 'comparatorOperator': 'LIKE'}, {'fieldName': 'flowType', 'fieldValue': 'workflow', 'comparatorOperator': 'EQUAL'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n data_name = data['fieldList'][0]['fieldValue'][1:-1]\n data_flowType = data['fieldList'][1]['fieldValue']\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n self.assertEqual(200, res.status_code, 'flowtype+name组合查询scheduler失败:%s' % res.text)\n\n def test_case06(self):\n \"\"\"query:根据上次修改时间查询全部的scheduler\"\"\"\n end_time = get_time()\n start_time = get_time() - 10 * 24 * 3600 * 1000\n data = {'fieldList': [{'fieldName': 'lastModifiedTime', 'fieldValue': start_time, 'comparatorOperator': 'GREATER_THAN'}, {'fieldName': 'lastModifiedTime', 'fieldValue': end_time, 'comparatorOperator': 'LESS_THAN'}], 'sortObject': {'field': 'lastModifiedTime', 'orderDirection': 'DESC'}, 'offset': 0, 'limit': 8}\n res = requests.post(url=self.query_scheduler_url, headers=get_headers(HOST_189), data=json.dumps(data))\n query_results = dict_res(res.text)\n first_Time = query_results['content'][0]['lastModifiedTime']\n self.assertEqual(end_time > first_Time > start_time, True, '查询结果的lastModifiedTime不包含在起始时间内,查询结果不正确')\n", "source": "the_stack_v2_python_sparse", "source_path": "singl_api/api_test_cases/cases_for_schedulers_api.py", "source_repo": "bingjiegu/For_API", "split": "test", "star_events_count": 0} {"blob_id": "8c9532d2bab77dbb2cac80ed5da551c1d1982b5d", "bodies": ["panels = []\nfor plug in registry.with_mixin('panel', active=True):\n try:\n panels += plug.render_panels(self, self.request, ctx)\n except Exception:\n log_error(self.request.path)\n logger.error(f\"Plugin '{plug.slug}' could not render custom panels at '{self.request.path}'\")\nreturn panels", "ctx = super().get_context_data(**kwargs)\nif settings.PLUGINS_ENABLED:\n ctx['plugin_panels'] = self.get_plugin_panels(ctx)\nreturn ctx"], "bodies_text": "<|body_start_0|>\n panels = []\n for plug in registry.with_mixin('panel', active=True):\n try:\n panels += plug.render_panels(self, self.request, ctx)\n except Exception:\n log_error(self.request.path)\n logger.error(f\"Plugin '{plug.slug}' could not render custom panels at '{self.request.path}'\")\n return panels\n<|end_body_0|>\n\n<|body_start_1|>\n ctx = super().get_context_data(**kwargs)\n if settings.PLUGINS_ENABLED:\n ctx['plugin_panels'] = self.get_plugin_panels(ctx)\n return ctx\n<|end_body_1|>\n", "class_docstring": "Custom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.", "class_name": "InvenTreePluginViewMixin", "detected_licenses": ["MIT", "LicenseRef-scancode-unknown-license-reference"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass InvenTreePluginViewMixin:\n \"\"\"Custom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.\"\"\"\n\n def get_plugin_panels(self, ctx):\n \"\"\"Return a list of extra 'plugin panels' associated with this view.\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add plugin context data to the view.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n panels = []\n for plug in registry.with_mixin('panel', active=True):\n try:\n panels += plug.render_panels(self, self.request, ctx)\n except Exception:\n log_error(self.request.path)\n logger.error(f\"Plugin '{plug.slug}' could not render custom panels at '{self.request.path}'\")\n return panels\n<|end_body_0|>\n\n<|body_start_1|>\n ctx = super().get_context_data(**kwargs)\n if settings.PLUGINS_ENABLED:\n ctx['plugin_panels'] = self.get_plugin_panels(ctx)\n return ctx\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000491", "length_bytes": 1207, "license_type": "permissive", "methods": [{"docstring": "Return a list of extra 'plugin panels' associated with this view.", "name": "get_plugin_panels", "signature": "def get_plugin_panels(self, ctx)"}, {"docstring": "Add plugin context data to the view.", "name": "get_context_data", "signature": "def get_context_data(self, **kwargs)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_011395", "prompt": "Implement the Python class `InvenTreePluginViewMixin` described below.\n\nClass description:\nCustom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.\n\nMethod signatures and docstrings:\n- def get_plugin_panels(self, ctx): Return a list of extra 'plugin panels' associated with this view.\n- def get_context_data(self, **kwargs): Add plugin context data to the view.", "prompted_full_text": "Implement the Python class `InvenTreePluginViewMixin` described below.\n\nClass description:\nCustom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.\n\nMethod signatures and docstrings:\n- def get_plugin_panels(self, ctx): Return a list of extra 'plugin panels' associated with this view.\n- def get_context_data(self, **kwargs): Add plugin context data to the view.\n\n<|skeleton|>\nclass InvenTreePluginViewMixin:\n \"\"\"Custom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.\"\"\"\n\n def get_plugin_panels(self, ctx):\n \"\"\"Return a list of extra 'plugin panels' associated with this view.\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add plugin context data to the view.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n panels = []\n for plug in registry.with_mixin('panel', active=True):\n try:\n panels += plug.render_panels(self, self.request, ctx)\n except Exception:\n log_error(self.request.path)\n logger.error(f\"Plugin '{plug.slug}' could not render custom panels at '{self.request.path}'\")\n return panels\n<|end_body_0|>\n\n<|body_start_1|>\n ctx = super().get_context_data(**kwargs)\n if settings.PLUGINS_ENABLED:\n ctx['plugin_panels'] = self.get_plugin_panels(ctx)\n return ctx\n<|end_body_1|>\n", "revision_id": "e88a8e99a5f0b201c67a95cba097c729f090d5e2", "skeleton": "<|skeleton|>\nclass InvenTreePluginViewMixin:\n \"\"\"Custom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.\"\"\"\n\n def get_plugin_panels(self, ctx):\n \"\"\"Return a list of extra 'plugin panels' associated with this view.\"\"\"\n <|body_0|>\n\n def get_context_data(self, **kwargs):\n \"\"\"Add plugin context data to the view.\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class InvenTreePluginViewMixin:\n \"\"\"Custom view mixin which adds context data to the view, based on loaded plugins. This allows rendered pages to be augmented by loaded plugins.\"\"\"\n\n def get_plugin_panels(self, ctx):\n \"\"\"Return a list of extra 'plugin panels' associated with this view.\"\"\"\n panels = []\n for plug in registry.with_mixin('panel', active=True):\n try:\n panels += plug.render_panels(self, self.request, ctx)\n except Exception:\n log_error(self.request.path)\n logger.error(f\"Plugin '{plug.slug}' could not render custom panels at '{self.request.path}'\")\n return panels\n\n def get_context_data(self, **kwargs):\n \"\"\"Add plugin context data to the view.\"\"\"\n ctx = super().get_context_data(**kwargs)\n if settings.PLUGINS_ENABLED:\n ctx['plugin_panels'] = self.get_plugin_panels(ctx)\n return ctx\n", "source": "the_stack_v2_python_sparse", "source_path": "InvenTree/plugin/views.py", "source_repo": "inventree/InvenTree", "split": "test", "star_events_count": 3077} {"blob_id": "09494c3ae0eeed60772ebbe50e1a297025dad9c5", "bodies": ["self.settings = settings\nself.database = database\nself.station = station\nself.group = group\nself.table_reader: TableReader = None\nself.status = logging.INFO\nfilename = self.settings['values_input_file'] % {'station': self.station[STATION_SHORT], 'riege': self.group[GROUP_NAME]}\nTable.__init__(self, competition_folder, filename)", "if not Table.open(self):\n self.status = logging.ERROR\n return False\ntry:\n reader = TableReader.from_settings(self, self.settings, 'values_input')\n if not reader.read(self.process_row):\n self.status = logging.ERROR\nexcept:\n logging.exception('Fehler beim Auslesen der Daten aus der Tabelle %s', self.filename)\n self.status = logging.ERROR\n return False\nreturn True", "attendee = self.database.filter_attendee(row_data, self.settings['values_input_required'])\nif not attendee:\n logging.warning('Zeile %d der Datei %s konnte nicht eindeutig zugewiesen werden.', row, self.filename)\n self.status = logging.WARNING\n return\nfor key, value in row_data.items():\n if key in self.station[STATION_COLUMNS]:\n attendee[key] = value"], "bodies_text": "<|body_start_0|>\n self.settings = settings\n self.database = database\n self.station = station\n self.group = group\n self.table_reader: TableReader = None\n self.status = logging.INFO\n filename = self.settings['values_input_file'] % {'station': self.station[STATION_SHORT], 'riege': self.group[GROUP_NAME]}\n Table.__init__(self, competition_folder, filename)\n<|end_body_0|>\n\n<|body_start_1|>\n if not Table.open(self):\n self.status = logging.ERROR\n return False\n try:\n reader = TableReader.from_settings(self, self.settings, 'values_input')\n if not reader.read(self.process_row):\n self.status = logging.ERROR\n except:\n logging.exception('Fehler beim Auslesen der Daten aus der Tabelle %s', self.filename)\n self.status = logging.ERROR\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n attendee = self.database.filter_attendee(row_data, self.settings['values_input_required'])\n if not attendee:\n logging.warning('Zeile %d der Datei %s konnte nicht eindeutig zugewiesen werden.', row, self.filename)\n self.status = logging.WARNING\n return\n for key, value in row_data.items():\n if key in self.station[STATION_COLUMNS]:\n attendee[key] = value\n<|end_body_2|>\n", "class_docstring": "Klasse zum Einlesen der Wertetabellen", "class_name": "ValuesInputTable", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ValuesInputTable:\n \"\"\"Klasse zum Einlesen der Wertetabellen\"\"\"\n\n def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group):\n \"\"\"Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege\"\"\"\n <|body_0|>\n\n def open(self) -> bool:\n \"\"\"Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich\"\"\"\n <|body_1|>\n\n def process_row(self, row: int, row_data: typing.Dict[str, typing.Any]):\n \"\"\"Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.settings = settings\n self.database = database\n self.station = station\n self.group = group\n self.table_reader: TableReader = None\n self.status = logging.INFO\n filename = self.settings['values_input_file'] % {'station': self.station[STATION_SHORT], 'riege': self.group[GROUP_NAME]}\n Table.__init__(self, competition_folder, filename)\n<|end_body_0|>\n\n<|body_start_1|>\n if not Table.open(self):\n self.status = logging.ERROR\n return False\n try:\n reader = TableReader.from_settings(self, self.settings, 'values_input')\n if not reader.read(self.process_row):\n self.status = logging.ERROR\n except:\n logging.exception('Fehler beim Auslesen der Daten aus der Tabelle %s', self.filename)\n self.status = logging.ERROR\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n attendee = self.database.filter_attendee(row_data, self.settings['values_input_required'])\n if not attendee:\n logging.warning('Zeile %d der Datei %s konnte nicht eindeutig zugewiesen werden.', row, self.filename)\n self.status = logging.WARNING\n return\n for key, value in row_data.items():\n if key in self.station[STATION_COLUMNS]:\n attendee[key] = value\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000492", "length_bytes": 2806, "license_type": "no_license", "methods": [{"docstring": "Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege", "name": "__init__", "signature": "def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group)"}, {"docstring": "Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich", "name": "open", "signature": "def open(self) -> bool"}, {"docstring": "Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile", "name": "process_row", "signature": "def process_row(self, row: int, row_data: typing.Dict[str, typing.Any])"}], "n_methods": 3, "prompt": "Implement the Python class `ValuesInputTable` described below.\n\nClass description:\nKlasse zum Einlesen der Wertetabellen\n\nMethod signatures and docstrings:\n- def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group): Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege\n- def open(self) -> bool: Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich\n- def process_row(self, row: int, row_data: typing.Dict[str, typing.Any]): Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile", "prompted_full_text": "Implement the Python class `ValuesInputTable` described below.\n\nClass description:\nKlasse zum Einlesen der Wertetabellen\n\nMethod signatures and docstrings:\n- def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group): Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege\n- def open(self) -> bool: Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich\n- def process_row(self, row: int, row_data: typing.Dict[str, typing.Any]): Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile\n\n<|skeleton|>\nclass ValuesInputTable:\n \"\"\"Klasse zum Einlesen der Wertetabellen\"\"\"\n\n def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group):\n \"\"\"Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege\"\"\"\n <|body_0|>\n\n def open(self) -> bool:\n \"\"\"Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich\"\"\"\n <|body_1|>\n\n def process_row(self, row: int, row_data: typing.Dict[str, typing.Any]):\n \"\"\"Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.settings = settings\n self.database = database\n self.station = station\n self.group = group\n self.table_reader: TableReader = None\n self.status = logging.INFO\n filename = self.settings['values_input_file'] % {'station': self.station[STATION_SHORT], 'riege': self.group[GROUP_NAME]}\n Table.__init__(self, competition_folder, filename)\n<|end_body_0|>\n\n<|body_start_1|>\n if not Table.open(self):\n self.status = logging.ERROR\n return False\n try:\n reader = TableReader.from_settings(self, self.settings, 'values_input')\n if not reader.read(self.process_row):\n self.status = logging.ERROR\n except:\n logging.exception('Fehler beim Auslesen der Daten aus der Tabelle %s', self.filename)\n self.status = logging.ERROR\n return False\n return True\n<|end_body_1|>\n\n<|body_start_2|>\n attendee = self.database.filter_attendee(row_data, self.settings['values_input_required'])\n if not attendee:\n logging.warning('Zeile %d der Datei %s konnte nicht eindeutig zugewiesen werden.', row, self.filename)\n self.status = logging.WARNING\n return\n for key, value in row_data.items():\n if key in self.station[STATION_COLUMNS]:\n attendee[key] = value\n<|end_body_2|>\n", "revision_id": "349aad3f5a71374f062a7a3b50d827dbf8e99bfe", "skeleton": "<|skeleton|>\nclass ValuesInputTable:\n \"\"\"Klasse zum Einlesen der Wertetabellen\"\"\"\n\n def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group):\n \"\"\"Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege\"\"\"\n <|body_0|>\n\n def open(self) -> bool:\n \"\"\"Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich\"\"\"\n <|body_1|>\n\n def process_row(self, row: int, row_data: typing.Dict[str, typing.Any]):\n \"\"\"Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ValuesInputTable:\n \"\"\"Klasse zum Einlesen der Wertetabellen\"\"\"\n\n def __init__(self, competition_folder: str, settings: SettingsTable, database: Database, station, group):\n \"\"\"Konstruktor. Args: competition_folder (str): Ordner der Veranstaltung settings (SettingsTable): Einstellungen database (Database): Datenbank station (typing.Dict): Station group (typing.Dict): Riege\"\"\"\n self.settings = settings\n self.database = database\n self.station = station\n self.group = group\n self.table_reader: TableReader = None\n self.status = logging.INFO\n filename = self.settings['values_input_file'] % {'station': self.station[STATION_SHORT], 'riege': self.group[GROUP_NAME]}\n Table.__init__(self, competition_folder, filename)\n\n def open(self) -> bool:\n \"\"\"Öffnen der Tabelle und Auslesen der Werte Returns: bool: True, wenn erfolgreich\"\"\"\n if not Table.open(self):\n self.status = logging.ERROR\n return False\n try:\n reader = TableReader.from_settings(self, self.settings, 'values_input')\n if not reader.read(self.process_row):\n self.status = logging.ERROR\n except:\n logging.exception('Fehler beim Auslesen der Daten aus der Tabelle %s', self.filename)\n self.status = logging.ERROR\n return False\n return True\n\n def process_row(self, row: int, row_data: typing.Dict[str, typing.Any]):\n \"\"\"Methode zum Auslesen einer Zeile Args: row (int): Zeilennummer row_data (typing.Dict[str, typing.Any]): Daten der Zeile\"\"\"\n attendee = self.database.filter_attendee(row_data, self.settings['values_input_required'])\n if not attendee:\n logging.warning('Zeile %d der Datei %s konnte nicht eindeutig zugewiesen werden.', row, self.filename)\n self.status = logging.WARNING\n return\n for key, value in row_data.items():\n if key in self.station[STATION_COLUMNS]:\n attendee[key] = value\n", "source": "the_stack_v2_python_sparse", "source_path": "scoring/values_input_table.py", "source_repo": "RobFro96/Talentiadeverwaltung", "split": "test", "star_events_count": 0} {"blob_id": "ef16fba0f02c4509ebc0d2daf8aa18f1d9a09001", "bodies": ["course_module = obj.module\nvalue = super(QuestionSerializer, self).to_representation(obj)\nvalue['type'] = obj.__class__.__name__\nuser = self.context['request'].user\nvalue['progress'] = []\nanswered_question_before = True\nfor course_module in obj.module.course.module_set.all():\n module_set = []\n for question in course_module.question_set.all():\n if answered_question_before and question.try_set.filter(solved=True, user=user).exists():\n module_set.append({'solved': True, 'title': question.title})\n else:\n answered_question_before = False\n module_set.append({'solved': False, 'title': question.title})\n value['progress'].append(module_set)\nvalue['last_question'] = obj.is_last_question()\nvalue['last_module'] = course_module.is_last_module()\nvalue['learning_text'] = course_module.learning_text\nserializer = obj.get_serializer()\nvalue['question_body'] = serializer(obj).data\nvalue['solved'] = obj.try_set.filter(solved=True, user=user).exists()\nreturn value", "question_type = validated_data.pop('type')\nif question_type == 'multiple_choice':\n MultipleChoiceQuestionSerializer().create(validated_data)\nelif question_type == 'info_text':\n InformationTextSerializer().create(validated_data)\nelif question_type == 'info_text_youtube':\n InformationYoutubeSerializer().create(validated_data)\nelse:\n raise ParseError(detail='{} is not a valid question type'.format(question_type))"], "bodies_text": "<|body_start_0|>\n course_module = obj.module\n value = super(QuestionSerializer, self).to_representation(obj)\n value['type'] = obj.__class__.__name__\n user = self.context['request'].user\n value['progress'] = []\n answered_question_before = True\n for course_module in obj.module.course.module_set.all():\n module_set = []\n for question in course_module.question_set.all():\n if answered_question_before and question.try_set.filter(solved=True, user=user).exists():\n module_set.append({'solved': True, 'title': question.title})\n else:\n answered_question_before = False\n module_set.append({'solved': False, 'title': question.title})\n value['progress'].append(module_set)\n value['last_question'] = obj.is_last_question()\n value['last_module'] = course_module.is_last_module()\n value['learning_text'] = course_module.learning_text\n serializer = obj.get_serializer()\n value['question_body'] = serializer(obj).data\n value['solved'] = obj.try_set.filter(solved=True, user=user).exists()\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n question_type = validated_data.pop('type')\n if question_type == 'multiple_choice':\n MultipleChoiceQuestionSerializer().create(validated_data)\n elif question_type == 'info_text':\n InformationTextSerializer().create(validated_data)\n elif question_type == 'info_text_youtube':\n InformationYoutubeSerializer().create(validated_data)\n else:\n raise ParseError(detail='{} is not a valid question type'.format(question_type))\n<|end_body_1|>\n", "class_docstring": "The serializer responsible for the Question object :author: Claas Voelcker", "class_name": "QuestionSerializer", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass QuestionSerializer:\n \"\"\"The serializer responsible for the Question object :author: Claas Voelcker\"\"\"\n\n def to_representation(self, obj):\n \"\"\"Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields\"\"\"\n <|body_0|>\n\n def create(self, validated_data):\n \"\"\"Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n course_module = obj.module\n value = super(QuestionSerializer, self).to_representation(obj)\n value['type'] = obj.__class__.__name__\n user = self.context['request'].user\n value['progress'] = []\n answered_question_before = True\n for course_module in obj.module.course.module_set.all():\n module_set = []\n for question in course_module.question_set.all():\n if answered_question_before and question.try_set.filter(solved=True, user=user).exists():\n module_set.append({'solved': True, 'title': question.title})\n else:\n answered_question_before = False\n module_set.append({'solved': False, 'title': question.title})\n value['progress'].append(module_set)\n value['last_question'] = obj.is_last_question()\n value['last_module'] = course_module.is_last_module()\n value['learning_text'] = course_module.learning_text\n serializer = obj.get_serializer()\n value['question_body'] = serializer(obj).data\n value['solved'] = obj.try_set.filter(solved=True, user=user).exists()\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n question_type = validated_data.pop('type')\n if question_type == 'multiple_choice':\n MultipleChoiceQuestionSerializer().create(validated_data)\n elif question_type == 'info_text':\n InformationTextSerializer().create(validated_data)\n elif question_type == 'info_text_youtube':\n InformationYoutubeSerializer().create(validated_data)\n else:\n raise ParseError(detail='{} is not a valid question type'.format(question_type))\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000493", "length_bytes": 20775, "license_type": "no_license", "methods": [{"docstring": "Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields", "name": "to_representation", "signature": "def to_representation(self, obj)"}, {"docstring": "Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation", "name": "create", "signature": "def create(self, validated_data)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_013434", "prompt": "Implement the Python class `QuestionSerializer` described below.\n\nClass description:\nThe serializer responsible for the Question object :author: Claas Voelcker\n\nMethod signatures and docstrings:\n- def to_representation(self, obj): Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields\n- def create(self, validated_data): Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation", "prompted_full_text": "Implement the Python class `QuestionSerializer` described below.\n\nClass description:\nThe serializer responsible for the Question object :author: Claas Voelcker\n\nMethod signatures and docstrings:\n- def to_representation(self, obj): Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields\n- def create(self, validated_data): Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation\n\n<|skeleton|>\nclass QuestionSerializer:\n \"\"\"The serializer responsible for the Question object :author: Claas Voelcker\"\"\"\n\n def to_representation(self, obj):\n \"\"\"Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields\"\"\"\n <|body_0|>\n\n def create(self, validated_data):\n \"\"\"Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n course_module = obj.module\n value = super(QuestionSerializer, self).to_representation(obj)\n value['type'] = obj.__class__.__name__\n user = self.context['request'].user\n value['progress'] = []\n answered_question_before = True\n for course_module in obj.module.course.module_set.all():\n module_set = []\n for question in course_module.question_set.all():\n if answered_question_before and question.try_set.filter(solved=True, user=user).exists():\n module_set.append({'solved': True, 'title': question.title})\n else:\n answered_question_before = False\n module_set.append({'solved': False, 'title': question.title})\n value['progress'].append(module_set)\n value['last_question'] = obj.is_last_question()\n value['last_module'] = course_module.is_last_module()\n value['learning_text'] = course_module.learning_text\n serializer = obj.get_serializer()\n value['question_body'] = serializer(obj).data\n value['solved'] = obj.try_set.filter(solved=True, user=user).exists()\n return value\n<|end_body_0|>\n\n<|body_start_1|>\n question_type = validated_data.pop('type')\n if question_type == 'multiple_choice':\n MultipleChoiceQuestionSerializer().create(validated_data)\n elif question_type == 'info_text':\n InformationTextSerializer().create(validated_data)\n elif question_type == 'info_text_youtube':\n InformationYoutubeSerializer().create(validated_data)\n else:\n raise ParseError(detail='{} is not a valid question type'.format(question_type))\n<|end_body_1|>\n", "revision_id": "6fc102c6841449dd9782183b22340d1f1cb7c5c4", "skeleton": "<|skeleton|>\nclass QuestionSerializer:\n \"\"\"The serializer responsible for the Question object :author: Claas Voelcker\"\"\"\n\n def to_representation(self, obj):\n \"\"\"Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields\"\"\"\n <|body_0|>\n\n def create(self, validated_data):\n \"\"\"Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class QuestionSerializer:\n \"\"\"The serializer responsible for the Question object :author: Claas Voelcker\"\"\"\n\n def to_representation(self, obj):\n \"\"\"Appends additional information to the model. :param: obj: The object that should be serialized (Question) :return: value: a valid json object containing all required fields\"\"\"\n course_module = obj.module\n value = super(QuestionSerializer, self).to_representation(obj)\n value['type'] = obj.__class__.__name__\n user = self.context['request'].user\n value['progress'] = []\n answered_question_before = True\n for course_module in obj.module.course.module_set.all():\n module_set = []\n for question in course_module.question_set.all():\n if answered_question_before and question.try_set.filter(solved=True, user=user).exists():\n module_set.append({'solved': True, 'title': question.title})\n else:\n answered_question_before = False\n module_set.append({'solved': False, 'title': question.title})\n value['progress'].append(module_set)\n value['last_question'] = obj.is_last_question()\n value['last_module'] = course_module.is_last_module()\n value['learning_text'] = course_module.learning_text\n serializer = obj.get_serializer()\n value['question_body'] = serializer(obj).data\n value['solved'] = obj.try_set.filter(solved=True, user=user).exists()\n return value\n\n def create(self, validated_data):\n \"\"\"Serializer that governs the dispatch to specific class serializers :param validated_data: the data to be serialized :return: serialized representation\"\"\"\n question_type = validated_data.pop('type')\n if question_type == 'multiple_choice':\n MultipleChoiceQuestionSerializer().create(validated_data)\n elif question_type == 'info_text':\n InformationTextSerializer().create(validated_data)\n elif question_type == 'info_text_youtube':\n InformationYoutubeSerializer().create(validated_data)\n else:\n raise ParseError(detail='{} is not a valid question type'.format(question_type))\n", "source": "the_stack_v2_python_sparse", "source_path": "django/learning_base/serializers.py", "source_repo": "cvoelcker/clonecademy", "split": "test", "star_events_count": 2} {"blob_id": "26aa6ffd7f143841d81f67f3793b710fa8c88694", "bodies": ["super(MLP, self).__init__()\nself.f = activation_function\nself.fc_first = nn.Linear(input_size, hidden_dim)\nself.layers = []\nfor i in range(num_layers):\n self.layers.append(nn.Linear(hidden_dim, hidden_dim))\nself.fc_last = nn.Linear(hidden_dim, 1)\nself.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\nself.to(self.device)\nfor i in range(len(self.layers)):\n self.layers[i].to(self.device)\nself.criterion = nn.BCELoss()\nself.optimizer = optim.SGD(self.parameters(), lr=learning_rate, momentum=momentum)\nself.scheduler = StepLR(self.optimizer, step_size=1, gamma=decay_factor)", "x = self.f(self.fc_first(x))\nfor i in range(len(self.layers)):\n x = self.f(self.layers[i](x))\nx = torch.sigmoid(self.fc_last(x))\nreturn x", "trainset = utils.TensorDataset(X_train, y_train)\ntrainloader = utils.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)\nfor epoch in range(epochs):\n print('epoch:', epoch, 'learning rate:', self.scheduler.get_lr())\n running_loss = 0.0\n for j, data in enumerate(trainloader, 0):\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n self.optimizer.zero_grad()\n outputs = self(inputs)\n loss = self.criterion(outputs.reshape(-1), labels)\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n if j % 200 == 199:\n print('[%d, %5d] loss: %.3f' % (epoch + 1, j + 1, running_loss / 200))\n running_loss = 0.0\n self.scheduler.step()", "testset = utils.TensorDataset(X_test, y_test)\ntestloader = utils.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\ncorrect = 0\ntotal = 0\nwith torch.no_grad():\n for data in testloader:\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n outputs = self(inputs)\n predictions = torch.round(outputs).reshape(-1)\n total += labels.size(0)\n correct += (predictions == labels).sum().item()\nreturn correct / total"], "bodies_text": "<|body_start_0|>\n super(MLP, self).__init__()\n self.f = activation_function\n self.fc_first = nn.Linear(input_size, hidden_dim)\n self.layers = []\n for i in range(num_layers):\n self.layers.append(nn.Linear(hidden_dim, hidden_dim))\n self.fc_last = nn.Linear(hidden_dim, 1)\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.to(self.device)\n for i in range(len(self.layers)):\n self.layers[i].to(self.device)\n self.criterion = nn.BCELoss()\n self.optimizer = optim.SGD(self.parameters(), lr=learning_rate, momentum=momentum)\n self.scheduler = StepLR(self.optimizer, step_size=1, gamma=decay_factor)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.f(self.fc_first(x))\n for i in range(len(self.layers)):\n x = self.f(self.layers[i](x))\n x = torch.sigmoid(self.fc_last(x))\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n trainset = utils.TensorDataset(X_train, y_train)\n trainloader = utils.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)\n for epoch in range(epochs):\n print('epoch:', epoch, 'learning rate:', self.scheduler.get_lr())\n running_loss = 0.0\n for j, data in enumerate(trainloader, 0):\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n self.optimizer.zero_grad()\n outputs = self(inputs)\n loss = self.criterion(outputs.reshape(-1), labels)\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n if j % 200 == 199:\n print('[%d, %5d] loss: %.3f' % (epoch + 1, j + 1, running_loss / 200))\n running_loss = 0.0\n self.scheduler.step()\n<|end_body_2|>\n\n<|body_start_3|>\n testset = utils.TensorDataset(X_test, y_test)\n testloader = utils.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n outputs = self(inputs)\n predictions = torch.round(outputs).reshape(-1)\n total += labels.size(0)\n correct += (predictions == labels).sum().item()\n return correct / total\n<|end_body_3|>\n", "class_docstring": "A multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py", "class_name": "MLP", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass MLP:\n \"\"\"A multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\"\"\"\n\n def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9):\n \"\"\"A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)\"\"\"\n <|body_1|>\n\n def learn(self, X_train, y_train, epochs=3):\n \"\"\"Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for\"\"\"\n <|body_2|>\n\n def get_accuracy(self, X_test, y_test):\n \"\"\"Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MLP, self).__init__()\n self.f = activation_function\n self.fc_first = nn.Linear(input_size, hidden_dim)\n self.layers = []\n for i in range(num_layers):\n self.layers.append(nn.Linear(hidden_dim, hidden_dim))\n self.fc_last = nn.Linear(hidden_dim, 1)\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.to(self.device)\n for i in range(len(self.layers)):\n self.layers[i].to(self.device)\n self.criterion = nn.BCELoss()\n self.optimizer = optim.SGD(self.parameters(), lr=learning_rate, momentum=momentum)\n self.scheduler = StepLR(self.optimizer, step_size=1, gamma=decay_factor)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.f(self.fc_first(x))\n for i in range(len(self.layers)):\n x = self.f(self.layers[i](x))\n x = torch.sigmoid(self.fc_last(x))\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n trainset = utils.TensorDataset(X_train, y_train)\n trainloader = utils.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)\n for epoch in range(epochs):\n print('epoch:', epoch, 'learning rate:', self.scheduler.get_lr())\n running_loss = 0.0\n for j, data in enumerate(trainloader, 0):\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n self.optimizer.zero_grad()\n outputs = self(inputs)\n loss = self.criterion(outputs.reshape(-1), labels)\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n if j % 200 == 199:\n print('[%d, %5d] loss: %.3f' % (epoch + 1, j + 1, running_loss / 200))\n running_loss = 0.0\n self.scheduler.step()\n<|end_body_2|>\n\n<|body_start_3|>\n testset = utils.TensorDataset(X_test, y_test)\n testloader = utils.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n outputs = self(inputs)\n predictions = torch.round(outputs).reshape(-1)\n total += labels.size(0)\n correct += (predictions == labels).sum().item()\n return correct / total\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000494", "length_bytes": 5050, "license_type": "no_license", "methods": [{"docstring": "A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent", "name": "__init__", "signature": "def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9)"}, {"docstring": "A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)", "name": "forward", "signature": "def forward(self, x)"}, {"docstring": "Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for", "name": "learn", "signature": "def learn(self, X_train, y_train, epochs=3)"}, {"docstring": "Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)", "name": "get_accuracy", "signature": "def get_accuracy(self, X_test, y_test)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_018289", "prompt": "Implement the Python class `MLP` described below.\n\nClass description:\nA multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9): A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent\n- def forward(self, x): A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)\n- def learn(self, X_train, y_train, epochs=3): Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for\n- def get_accuracy(self, X_test, y_test): Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)", "prompted_full_text": "Implement the Python class `MLP` described below.\n\nClass description:\nA multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\n\nMethod signatures and docstrings:\n- def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9): A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent\n- def forward(self, x): A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)\n- def learn(self, X_train, y_train, epochs=3): Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for\n- def get_accuracy(self, X_test, y_test): Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)\n\n<|skeleton|>\nclass MLP:\n \"\"\"A multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\"\"\"\n\n def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9):\n \"\"\"A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)\"\"\"\n <|body_1|>\n\n def learn(self, X_train, y_train, epochs=3):\n \"\"\"Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for\"\"\"\n <|body_2|>\n\n def get_accuracy(self, X_test, y_test):\n \"\"\"Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n super(MLP, self).__init__()\n self.f = activation_function\n self.fc_first = nn.Linear(input_size, hidden_dim)\n self.layers = []\n for i in range(num_layers):\n self.layers.append(nn.Linear(hidden_dim, hidden_dim))\n self.fc_last = nn.Linear(hidden_dim, 1)\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.to(self.device)\n for i in range(len(self.layers)):\n self.layers[i].to(self.device)\n self.criterion = nn.BCELoss()\n self.optimizer = optim.SGD(self.parameters(), lr=learning_rate, momentum=momentum)\n self.scheduler = StepLR(self.optimizer, step_size=1, gamma=decay_factor)\n<|end_body_0|>\n\n<|body_start_1|>\n x = self.f(self.fc_first(x))\n for i in range(len(self.layers)):\n x = self.f(self.layers[i](x))\n x = torch.sigmoid(self.fc_last(x))\n return x\n<|end_body_1|>\n\n<|body_start_2|>\n trainset = utils.TensorDataset(X_train, y_train)\n trainloader = utils.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)\n for epoch in range(epochs):\n print('epoch:', epoch, 'learning rate:', self.scheduler.get_lr())\n running_loss = 0.0\n for j, data in enumerate(trainloader, 0):\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n self.optimizer.zero_grad()\n outputs = self(inputs)\n loss = self.criterion(outputs.reshape(-1), labels)\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n if j % 200 == 199:\n print('[%d, %5d] loss: %.3f' % (epoch + 1, j + 1, running_loss / 200))\n running_loss = 0.0\n self.scheduler.step()\n<|end_body_2|>\n\n<|body_start_3|>\n testset = utils.TensorDataset(X_test, y_test)\n testloader = utils.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n outputs = self(inputs)\n predictions = torch.round(outputs).reshape(-1)\n total += labels.size(0)\n correct += (predictions == labels).sum().item()\n return correct / total\n<|end_body_3|>\n", "revision_id": "a8fc124199571f19cdf41325fdcf49ee3a5cb649", "skeleton": "<|skeleton|>\nclass MLP:\n \"\"\"A multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\"\"\"\n\n def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9):\n \"\"\"A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent\"\"\"\n <|body_0|>\n\n def forward(self, x):\n \"\"\"A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)\"\"\"\n <|body_1|>\n\n def learn(self, X_train, y_train, epochs=3):\n \"\"\"Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for\"\"\"\n <|body_2|>\n\n def get_accuracy(self, X_test, y_test):\n \"\"\"Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class MLP:\n \"\"\"A multilayer perceptron based on the starter tutorial at pytorch: https://pytorch.org/tutorials/beginner/blitz/cifar10_tutorial.html#sphx-glr-beginner-blitz-cifar10-tutorial-py\"\"\"\n\n def __init__(self, input_size, hidden_dim, num_layers=1, activation_function=torch.relu, learning_rate=0.001, decay_factor=0.5, momentum=0.9):\n \"\"\"A simple multi layer network for binary classification :param input_size: the dimensionality of the feature vectors to be input :param hidden_dim: the number of neurons used in each layers :param num_layers: number of layers in the network :param activation_function: the nonlinearity used after each layer except the last :param learning_rate: coefficient for gradient decent :param decay_factor: coefficient for scheduled learning_rate decrease per epoch :param momentum: momentum coefficient for gradient decent\"\"\"\n super(MLP, self).__init__()\n self.f = activation_function\n self.fc_first = nn.Linear(input_size, hidden_dim)\n self.layers = []\n for i in range(num_layers):\n self.layers.append(nn.Linear(hidden_dim, hidden_dim))\n self.fc_last = nn.Linear(hidden_dim, 1)\n self.device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n self.to(self.device)\n for i in range(len(self.layers)):\n self.layers[i].to(self.device)\n self.criterion = nn.BCELoss()\n self.optimizer = optim.SGD(self.parameters(), lr=learning_rate, momentum=momentum)\n self.scheduler = StepLR(self.optimizer, step_size=1, gamma=decay_factor)\n\n def forward(self, x):\n \"\"\"A forward pass of the network to classify a given tweet :param x: a feature vector representing a tweet :return: a 1d tensor of predicted classes (between 1 for informative, 0 for not)\"\"\"\n x = self.f(self.fc_first(x))\n for i in range(len(self.layers)):\n x = self.f(self.layers[i](x))\n x = torch.sigmoid(self.fc_last(x))\n return x\n\n def learn(self, X_train, y_train, epochs=3):\n \"\"\"Train the network on a labeled dataset :param X_train: a tensor of features :param y_train: a tensor of labels :param epochs: an int, number of epochs to learn for\"\"\"\n trainset = utils.TensorDataset(X_train, y_train)\n trainloader = utils.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2)\n for epoch in range(epochs):\n print('epoch:', epoch, 'learning rate:', self.scheduler.get_lr())\n running_loss = 0.0\n for j, data in enumerate(trainloader, 0):\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n self.optimizer.zero_grad()\n outputs = self(inputs)\n loss = self.criterion(outputs.reshape(-1), labels)\n loss.backward()\n self.optimizer.step()\n running_loss += loss.item()\n if j % 200 == 199:\n print('[%d, %5d] loss: %.3f' % (epoch + 1, j + 1, running_loss / 200))\n running_loss = 0.0\n self.scheduler.step()\n\n def get_accuracy(self, X_test, y_test):\n \"\"\"Get the accuracy of the model on some test set :param X_test: a tensor of features :param y_test: a tensor of labels :return: a float, the accuracy (number of correct predictions out of total)\"\"\"\n testset = utils.TensorDataset(X_test, y_test)\n testloader = utils.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2)\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n inputs, labels = (data[0].to(self.device), data[1].to(self.device))\n outputs = self(inputs)\n predictions = torch.round(outputs).reshape(-1)\n total += labels.size(0)\n correct += (predictions == labels).sum().item()\n return correct / total\n", "source": "the_stack_v2_python_sparse", "source_path": "models/baseline_mlp.py", "source_repo": "Midhunadharshini/IITUDND", "split": "test", "star_events_count": 0} {"blob_id": "32a4e3304ba9c207642f37aad96e1f4fdf2ee3ec", "bodies": ["self.jwt_secret = jwt_secret\nself.jwt_issuer = jwt_issuer\nself.jwt_audiences = jwt_audiences\nself.default_lifespan = default_lifespan\nreturn", "if lifespan is None:\n lifespan = self.default_lifespan\nprint('TokenAgent.create_for_user: ' + str(user.to_jdata()))\ntoken_payload = create_user_payload_now(user, self.jwt_issuer, self.jwt_audiences, lifespan)\nprint('TokenAgent created payload: ' + str(token_payload.data))\ntoken = token_payload.to_jwt_token(self.jwt_secret)\nprint('TokenAgent created token: ' + str(token))\nreturn token", "nest_user = NestUser(nest_id, username, given_name, family_name, thumb_url=thumb_url)\ntoken = self.create_for_user(nest_user, lifespan=lifespan)\nreturn token", "audience = self.jwt_audiences[0]\ntkn_payload = decode_token(token, self.jwt_secret, audience, self.jwt_issuer)\nreturn tkn_payload"], "bodies_text": "<|body_start_0|>\n self.jwt_secret = jwt_secret\n self.jwt_issuer = jwt_issuer\n self.jwt_audiences = jwt_audiences\n self.default_lifespan = default_lifespan\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if lifespan is None:\n lifespan = self.default_lifespan\n print('TokenAgent.create_for_user: ' + str(user.to_jdata()))\n token_payload = create_user_payload_now(user, self.jwt_issuer, self.jwt_audiences, lifespan)\n print('TokenAgent created payload: ' + str(token_payload.data))\n token = token_payload.to_jwt_token(self.jwt_secret)\n print('TokenAgent created token: ' + str(token))\n return token\n<|end_body_1|>\n\n<|body_start_2|>\n nest_user = NestUser(nest_id, username, given_name, family_name, thumb_url=thumb_url)\n token = self.create_for_user(nest_user, lifespan=lifespan)\n return token\n<|end_body_2|>\n\n<|body_start_3|>\n audience = self.jwt_audiences[0]\n tkn_payload = decode_token(token, self.jwt_secret, audience, self.jwt_issuer)\n return tkn_payload\n<|end_body_3|>\n", "class_docstring": "Class that creates JWT tokens for users.", "class_name": "TokenAgent", "detected_licenses": ["MIT"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass TokenAgent:\n \"\"\"Class that creates JWT tokens for users.\"\"\"\n\n def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None):\n \"\"\"Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.\"\"\"\n <|body_0|>\n\n def create_for_user(self, user, lifespan=None):\n \"\"\"Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan\"\"\"\n <|body_1|>\n\n def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal'):\n \"\"\"Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.\"\"\"\n <|body_2|>\n\n def decode(self, token):\n \"\"\"token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.jwt_secret = jwt_secret\n self.jwt_issuer = jwt_issuer\n self.jwt_audiences = jwt_audiences\n self.default_lifespan = default_lifespan\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if lifespan is None:\n lifespan = self.default_lifespan\n print('TokenAgent.create_for_user: ' + str(user.to_jdata()))\n token_payload = create_user_payload_now(user, self.jwt_issuer, self.jwt_audiences, lifespan)\n print('TokenAgent created payload: ' + str(token_payload.data))\n token = token_payload.to_jwt_token(self.jwt_secret)\n print('TokenAgent created token: ' + str(token))\n return token\n<|end_body_1|>\n\n<|body_start_2|>\n nest_user = NestUser(nest_id, username, given_name, family_name, thumb_url=thumb_url)\n token = self.create_for_user(nest_user, lifespan=lifespan)\n return token\n<|end_body_2|>\n\n<|body_start_3|>\n audience = self.jwt_audiences[0]\n tkn_payload = decode_token(token, self.jwt_secret, audience, self.jwt_issuer)\n return tkn_payload\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000495", "length_bytes": 6662, "license_type": "permissive", "methods": [{"docstring": "Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.", "name": "__init__", "signature": "def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None)"}, {"docstring": "Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan", "name": "create_for_user", "signature": "def create_for_user(self, user, lifespan=None)"}, {"docstring": "Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.", "name": "create_for_system_operation_as_user", "signature": "def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal')"}, {"docstring": "token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.", "name": "decode", "signature": "def decode(self, token)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_029256", "prompt": "Implement the Python class `TokenAgent` described below.\n\nClass description:\nClass that creates JWT tokens for users.\n\nMethod signatures and docstrings:\n- def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None): Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.\n- def create_for_user(self, user, lifespan=None): Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan\n- def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal'): Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.\n- def decode(self, token): token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.", "prompted_full_text": "Implement the Python class `TokenAgent` described below.\n\nClass description:\nClass that creates JWT tokens for users.\n\nMethod signatures and docstrings:\n- def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None): Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.\n- def create_for_user(self, user, lifespan=None): Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan\n- def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal'): Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.\n- def decode(self, token): token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.\n\n<|skeleton|>\nclass TokenAgent:\n \"\"\"Class that creates JWT tokens for users.\"\"\"\n\n def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None):\n \"\"\"Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.\"\"\"\n <|body_0|>\n\n def create_for_user(self, user, lifespan=None):\n \"\"\"Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan\"\"\"\n <|body_1|>\n\n def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal'):\n \"\"\"Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.\"\"\"\n <|body_2|>\n\n def decode(self, token):\n \"\"\"token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n self.jwt_secret = jwt_secret\n self.jwt_issuer = jwt_issuer\n self.jwt_audiences = jwt_audiences\n self.default_lifespan = default_lifespan\n return\n<|end_body_0|>\n\n<|body_start_1|>\n if lifespan is None:\n lifespan = self.default_lifespan\n print('TokenAgent.create_for_user: ' + str(user.to_jdata()))\n token_payload = create_user_payload_now(user, self.jwt_issuer, self.jwt_audiences, lifespan)\n print('TokenAgent created payload: ' + str(token_payload.data))\n token = token_payload.to_jwt_token(self.jwt_secret)\n print('TokenAgent created token: ' + str(token))\n return token\n<|end_body_1|>\n\n<|body_start_2|>\n nest_user = NestUser(nest_id, username, given_name, family_name, thumb_url=thumb_url)\n token = self.create_for_user(nest_user, lifespan=lifespan)\n return token\n<|end_body_2|>\n\n<|body_start_3|>\n audience = self.jwt_audiences[0]\n tkn_payload = decode_token(token, self.jwt_secret, audience, self.jwt_issuer)\n return tkn_payload\n<|end_body_3|>\n", "revision_id": "947af72a51c99096ddcce3c3c71bef8a144a17bb", "skeleton": "<|skeleton|>\nclass TokenAgent:\n \"\"\"Class that creates JWT tokens for users.\"\"\"\n\n def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None):\n \"\"\"Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.\"\"\"\n <|body_0|>\n\n def create_for_user(self, user, lifespan=None):\n \"\"\"Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan\"\"\"\n <|body_1|>\n\n def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal'):\n \"\"\"Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.\"\"\"\n <|body_2|>\n\n def decode(self, token):\n \"\"\"token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class TokenAgent:\n \"\"\"Class that creates JWT tokens for users.\"\"\"\n\n def __init__(self, jwt_secret, jwt_issuer, jwt_audiences, default_lifespan=None):\n \"\"\"Initializes self. Args: jwt_secret (str): The symmetric secret for signing tokens. jwt_issuer (str): The issuer string to use (\"who issued this token?\"). jwt_audiences (List[str]): The audiences list to use (\"who should accept this token?\"). default_lifespan(datetime.timedelta): if no lifespan is passed to the create* methods, this will be used. Returns: None: None.\"\"\"\n self.jwt_secret = jwt_secret\n self.jwt_issuer = jwt_issuer\n self.jwt_audiences = jwt_audiences\n self.default_lifespan = default_lifespan\n return\n\n def create_for_user(self, user, lifespan=None):\n \"\"\"Creates a token for a user. Args: user (nest_py.flask.accounts.user.NestUser): The user. lifespan (datetime.timedelta): How long the token should be valid. Or None to use the default_lifespan\"\"\"\n if lifespan is None:\n lifespan = self.default_lifespan\n print('TokenAgent.create_for_user: ' + str(user.to_jdata()))\n token_payload = create_user_payload_now(user, self.jwt_issuer, self.jwt_audiences, lifespan)\n print('TokenAgent created payload: ' + str(token_payload.data))\n token = token_payload.to_jwt_token(self.jwt_secret)\n print('TokenAgent created token: ' + str(token))\n return token\n\n def create_for_system_operation_as_user(self, nest_id, lifespan=None, given_name='Nest', family_name='System', thumb_url='', username='nestsysteminternal'):\n \"\"\"Creates a token for a system operation performed on a user's behalf. Args: nest_id(NestId): The user's id. lifespan (Optional(datetime.timedelta)): How long the token should be valid. given_name (Optional[str]): A given name to use on the token. family_name (Optional[str]): A family name to use on the token. thumb_url (Optional[str]): A thumbnail URL to use on the token. username (Optional[str]): A username to use on the token.\"\"\"\n nest_user = NestUser(nest_id, username, given_name, family_name, thumb_url=thumb_url)\n token = self.create_for_user(nest_user, lifespan=lifespan)\n return token\n\n def decode(self, token):\n \"\"\"token(str): A jwt token, generated by a compatible TokenAgent returns a TokenPayload. Returns None if the token could not be decoded. Will return a TokenPayload if it is expired.\"\"\"\n audience = self.jwt_audiences[0]\n tkn_payload = decode_token(token, self.jwt_secret, audience, self.jwt_issuer)\n return tkn_payload\n", "source": "the_stack_v2_python_sparse", "source_path": "nest_py/core/flask/accounts/token.py", "source_repo": "bodom0015/platform", "split": "test", "star_events_count": 0} {"blob_id": "b326dd6203a32f70a9af10a28eebbca263c6a335", "bodies": ["AnomalyEngine = app.config['AnomalyEngine']\nsimulation = AnomalyEngine.lookup_simulation(interface)\nif simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\nif simulation.is_running():\n return (simulation.status(), 201)\nelse:\n return ({'message': 'No simulation running'}, 200)", "AnomalyEngine = app.config['AnomalyEngine']\ninterface_simulation = AnomalyEngine.lookup_simulation(interface)\nargs = simulation_parser.parse_args()\nparams = {key: value for key, value in args.items() if value}\nif interface_simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\nlogging.info('Using interface ' + interface + ' for the WAN simulation')\nlogging.info('Using the following simulation params: ' + json.dumps(params))\nif AnomalyEngine.simulation_running(interface_simulation):\n logging.info('There is already a simulation running on interface ' + interface + '. Trying to add rule.')\n if not interface_simulation.network_conflict(params):\n error = AnomalyEngine.start_simulation(interface_simulation, params, add_rule=True)\n if error:\n api.abort(500, 'Error during start of simulation: {}'.format(error))\n else:\n api.abort(406, 'There is already a running simulation using the network you specified')\n return (interface_simulation.status(), 201)\nAnomalyEngine.start_simulation(interface_simulation, params)\nreturn (interface_simulation.status(), 200)", "AnomalyEngine = app.config['AnomalyEngine']\nsimulation = AnomalyEngine.lookup_simulation(interface)\nargs = simulation_parser.parse_args()\nparams = {key: value for key, value in args.items() if value}\nif simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\nif not simulation.is_running():\n api.abort(405, 'There is no simulation running on interface {}'.format(interface))\nlogging.info('Updating WAN simulation on ' + interface)\nlogging.info('Using the following simulation params: ' + json.dumps(params))\nerror = AnomalyEngine.start_simulation(simulation, params, update=True)\nif error:\n api.abort(500, 'Error during execution: {}'.format(error))\nreturn (simulation.status(), 201)", "AnomalyEngine = app.config['AnomalyEngine']\nsimulation = AnomalyEngine.lookup_simulation(interface)\nif simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\nif simulation.is_running():\n error = simulation.stop()\n if error:\n api.abort(500, 'Error during stop operation: {}'.format(error))\nelse:\n api.abort(404, 'There is no simulation running on interface {}'.format(interface))"], "bodies_text": "<|body_start_0|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n return (simulation.status(), 201)\n else:\n return ({'message': 'No simulation running'}, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n AnomalyEngine = app.config['AnomalyEngine']\n interface_simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if interface_simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n logging.info('Using interface ' + interface + ' for the WAN simulation')\n logging.info('Using the following simulation params: ' + json.dumps(params))\n if AnomalyEngine.simulation_running(interface_simulation):\n logging.info('There is already a simulation running on interface ' + interface + '. Trying to add rule.')\n if not interface_simulation.network_conflict(params):\n error = AnomalyEngine.start_simulation(interface_simulation, params, add_rule=True)\n if error:\n api.abort(500, 'Error during start of simulation: {}'.format(error))\n else:\n api.abort(406, 'There is already a running simulation using the network you specified')\n return (interface_simulation.status(), 201)\n AnomalyEngine.start_simulation(interface_simulation, params)\n return (interface_simulation.status(), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if not simulation.is_running():\n api.abort(405, 'There is no simulation running on interface {}'.format(interface))\n logging.info('Updating WAN simulation on ' + interface)\n logging.info('Using the following simulation params: ' + json.dumps(params))\n error = AnomalyEngine.start_simulation(simulation, params, update=True)\n if error:\n api.abort(500, 'Error during execution: {}'.format(error))\n return (simulation.status(), 201)\n<|end_body_2|>\n\n<|body_start_3|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n error = simulation.stop()\n if error:\n api.abort(500, 'Error during stop operation: {}'.format(error))\n else:\n api.abort(404, 'There is no simulation running on interface {}'.format(interface))\n<|end_body_3|>\n", "class_docstring": "", "class_name": "WAN", "detected_licenses": ["Apache-2.0"], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass WAN:\n\n def get(self, interface):\n \"\"\"Returns status of WAN simulation on the given interface.\"\"\"\n <|body_0|>\n\n def post(self, interface):\n \"\"\"Start WAN simulation on given interface or add rule to running WAN simulation.\"\"\"\n <|body_1|>\n\n def put(self, interface):\n \"\"\"Update a WAN simulation on given interface\"\"\"\n <|body_2|>\n\n def delete(self, interface):\n \"\"\"Stop WAN Simulation on given interface\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n return (simulation.status(), 201)\n else:\n return ({'message': 'No simulation running'}, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n AnomalyEngine = app.config['AnomalyEngine']\n interface_simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if interface_simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n logging.info('Using interface ' + interface + ' for the WAN simulation')\n logging.info('Using the following simulation params: ' + json.dumps(params))\n if AnomalyEngine.simulation_running(interface_simulation):\n logging.info('There is already a simulation running on interface ' + interface + '. Trying to add rule.')\n if not interface_simulation.network_conflict(params):\n error = AnomalyEngine.start_simulation(interface_simulation, params, add_rule=True)\n if error:\n api.abort(500, 'Error during start of simulation: {}'.format(error))\n else:\n api.abort(406, 'There is already a running simulation using the network you specified')\n return (interface_simulation.status(), 201)\n AnomalyEngine.start_simulation(interface_simulation, params)\n return (interface_simulation.status(), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if not simulation.is_running():\n api.abort(405, 'There is no simulation running on interface {}'.format(interface))\n logging.info('Updating WAN simulation on ' + interface)\n logging.info('Using the following simulation params: ' + json.dumps(params))\n error = AnomalyEngine.start_simulation(simulation, params, update=True)\n if error:\n api.abort(500, 'Error during execution: {}'.format(error))\n return (simulation.status(), 201)\n<|end_body_2|>\n\n<|body_start_3|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n error = simulation.stop()\n if error:\n api.abort(500, 'Error during stop operation: {}'.format(error))\n else:\n api.abort(404, 'There is no simulation running on interface {}'.format(interface))\n<|end_body_3|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000496", "length_bytes": 8057, "license_type": "permissive", "methods": [{"docstring": "Returns status of WAN simulation on the given interface.", "name": "get", "signature": "def get(self, interface)"}, {"docstring": "Start WAN simulation on given interface or add rule to running WAN simulation.", "name": "post", "signature": "def post(self, interface)"}, {"docstring": "Update a WAN simulation on given interface", "name": "put", "signature": "def put(self, interface)"}, {"docstring": "Stop WAN Simulation on given interface", "name": "delete", "signature": "def delete(self, interface)"}], "n_methods": 4, "original_id": "stack_v2_sparse_classes_30k_train_032098", "prompt": "Implement the Python class `WAN` described below.\n\nClass description:\nImplement the WAN class.\n\nMethod signatures and docstrings:\n- def get(self, interface): Returns status of WAN simulation on the given interface.\n- def post(self, interface): Start WAN simulation on given interface or add rule to running WAN simulation.\n- def put(self, interface): Update a WAN simulation on given interface\n- def delete(self, interface): Stop WAN Simulation on given interface", "prompted_full_text": "Implement the Python class `WAN` described below.\n\nClass description:\nImplement the WAN class.\n\nMethod signatures and docstrings:\n- def get(self, interface): Returns status of WAN simulation on the given interface.\n- def post(self, interface): Start WAN simulation on given interface or add rule to running WAN simulation.\n- def put(self, interface): Update a WAN simulation on given interface\n- def delete(self, interface): Stop WAN Simulation on given interface\n\n<|skeleton|>\nclass WAN:\n\n def get(self, interface):\n \"\"\"Returns status of WAN simulation on the given interface.\"\"\"\n <|body_0|>\n\n def post(self, interface):\n \"\"\"Start WAN simulation on given interface or add rule to running WAN simulation.\"\"\"\n <|body_1|>\n\n def put(self, interface):\n \"\"\"Update a WAN simulation on given interface\"\"\"\n <|body_2|>\n\n def delete(self, interface):\n \"\"\"Stop WAN Simulation on given interface\"\"\"\n <|body_3|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n return (simulation.status(), 201)\n else:\n return ({'message': 'No simulation running'}, 200)\n<|end_body_0|>\n\n<|body_start_1|>\n AnomalyEngine = app.config['AnomalyEngine']\n interface_simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if interface_simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n logging.info('Using interface ' + interface + ' for the WAN simulation')\n logging.info('Using the following simulation params: ' + json.dumps(params))\n if AnomalyEngine.simulation_running(interface_simulation):\n logging.info('There is already a simulation running on interface ' + interface + '. Trying to add rule.')\n if not interface_simulation.network_conflict(params):\n error = AnomalyEngine.start_simulation(interface_simulation, params, add_rule=True)\n if error:\n api.abort(500, 'Error during start of simulation: {}'.format(error))\n else:\n api.abort(406, 'There is already a running simulation using the network you specified')\n return (interface_simulation.status(), 201)\n AnomalyEngine.start_simulation(interface_simulation, params)\n return (interface_simulation.status(), 200)\n<|end_body_1|>\n\n<|body_start_2|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if not simulation.is_running():\n api.abort(405, 'There is no simulation running on interface {}'.format(interface))\n logging.info('Updating WAN simulation on ' + interface)\n logging.info('Using the following simulation params: ' + json.dumps(params))\n error = AnomalyEngine.start_simulation(simulation, params, update=True)\n if error:\n api.abort(500, 'Error during execution: {}'.format(error))\n return (simulation.status(), 201)\n<|end_body_2|>\n\n<|body_start_3|>\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n error = simulation.stop()\n if error:\n api.abort(500, 'Error during stop operation: {}'.format(error))\n else:\n api.abort(404, 'There is no simulation running on interface {}'.format(interface))\n<|end_body_3|>\n", "revision_id": "8be390e0bace6aa87fe60fa744e97408c40e7375", "skeleton": "<|skeleton|>\nclass WAN:\n\n def get(self, interface):\n \"\"\"Returns status of WAN simulation on the given interface.\"\"\"\n <|body_0|>\n\n def post(self, interface):\n \"\"\"Start WAN simulation on given interface or add rule to running WAN simulation.\"\"\"\n <|body_1|>\n\n def put(self, interface):\n \"\"\"Update a WAN simulation on given interface\"\"\"\n <|body_2|>\n\n def delete(self, interface):\n \"\"\"Stop WAN Simulation on given interface\"\"\"\n <|body_3|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class WAN:\n def get(self, interface):\n \"\"\"Returns status of WAN simulation on the given interface.\"\"\"\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n return (simulation.status(), 201)\n else:\n return ({'message': 'No simulation running'}, 200)\n\n def post(self, interface):\n \"\"\"Start WAN simulation on given interface or add rule to running WAN simulation.\"\"\"\n AnomalyEngine = app.config['AnomalyEngine']\n interface_simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if interface_simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n logging.info('Using interface ' + interface + ' for the WAN simulation')\n logging.info('Using the following simulation params: ' + json.dumps(params))\n if AnomalyEngine.simulation_running(interface_simulation):\n logging.info('There is already a simulation running on interface ' + interface + '. Trying to add rule.')\n if not interface_simulation.network_conflict(params):\n error = AnomalyEngine.start_simulation(interface_simulation, params, add_rule=True)\n if error:\n api.abort(500, 'Error during start of simulation: {}'.format(error))\n else:\n api.abort(406, 'There is already a running simulation using the network you specified')\n return (interface_simulation.status(), 201)\n AnomalyEngine.start_simulation(interface_simulation, params)\n return (interface_simulation.status(), 200)\n\n def put(self, interface):\n \"\"\"Update a WAN simulation on given interface\"\"\"\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n args = simulation_parser.parse_args()\n params = {key: value for key, value in args.items() if value}\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if not simulation.is_running():\n api.abort(405, 'There is no simulation running on interface {}'.format(interface))\n logging.info('Updating WAN simulation on ' + interface)\n logging.info('Using the following simulation params: ' + json.dumps(params))\n error = AnomalyEngine.start_simulation(simulation, params, update=True)\n if error:\n api.abort(500, 'Error during execution: {}'.format(error))\n return (simulation.status(), 201)\n\n def delete(self, interface):\n \"\"\"Stop WAN Simulation on given interface\"\"\"\n AnomalyEngine = app.config['AnomalyEngine']\n simulation = AnomalyEngine.lookup_simulation(interface)\n if simulation == None:\n logging.info(\"Can't find interface \" + interface)\n api.abort(400, \"Interface {} doesn't exist\".format(interface))\n if simulation.is_running():\n error = simulation.stop()\n if error:\n api.abort(500, 'Error during stop operation: {}'.format(error))\n else:\n api.abort(404, 'There is no simulation running on interface {}'.format(interface))\n", "source": "the_stack_v2_python_sparse", "source_path": "anomaly-injector-agent/api/endpoints/wan.py", "source_repo": "dos-group/distributed-anomaly-injection", "split": "test", "star_events_count": 0} {"blob_id": "df0435da5a001a50958fcb7ef22e23988c7a91db", "bodies": ["field_name = self.name\nfor record in records:\n context = record.env.context\n binary_value = record[field_name]\n field_object = record._fields[field_name]\n parent_field = field_object._attrs.get('resize_based_on')\n if parent_field and (not record.env.context.get('refresh_image_cache')):\n record[parent_field] = binary_value\n return\n else:\n width = field_object._attrs.get('height')\n height = field_object._attrs.get('width')\n if width or height:\n size = (height or 64, width or 64)\n binary_value = image_resize_image(binary_value, size)\n self._write_binary(record, field_name, binary_value)\n for _name, _field in record._fields.items():\n if not isinstance(_field, BinaryField):\n continue\n if _field._attrs.get('resize_based_on') == field_name:\n _field._refresh_cache(record)\nreturn", "field = record._fields[self.name]\nparent_field = field._attrs.get('resize_based_on')\n_logger.debug('Refreshing Image Cache from the field %s of object %s id : %s' % (field.name, record._name, record.id))\nctx = record.env.context.copy()\nctx['refresh_image_cache'] = True\nctx['bin_base64_%s' % field._attrs.get('resize_based_on')] = True\nresized_image = None\nif parent_field:\n original_binary = getattr(record, parent_field)\n size = (field._attrs.get('height'), field._attrs.get('width'))\n resized_image = image_resize_image(original_binary, size)\nrecord.with_context(**ctx)[field.name] = resized_image", "context = record.env.context\nfield_name = self.name\nif not context.get('bin_size_%s' % field_name) and (not context.get('bin_base64_%s' % field_name)) and storage.external_storage_server:\n if context.get('bin_size'):\n context.pop('bin_size')\n return storage.get_url(binary_uid)\nelse:\n return super(ImageField, self)._read_binary(storage, record, binary_uid)"], "bodies_text": "<|body_start_0|>\n field_name = self.name\n for record in records:\n context = record.env.context\n binary_value = record[field_name]\n field_object = record._fields[field_name]\n parent_field = field_object._attrs.get('resize_based_on')\n if parent_field and (not record.env.context.get('refresh_image_cache')):\n record[parent_field] = binary_value\n return\n else:\n width = field_object._attrs.get('height')\n height = field_object._attrs.get('width')\n if width or height:\n size = (height or 64, width or 64)\n binary_value = image_resize_image(binary_value, size)\n self._write_binary(record, field_name, binary_value)\n for _name, _field in record._fields.items():\n if not isinstance(_field, BinaryField):\n continue\n if _field._attrs.get('resize_based_on') == field_name:\n _field._refresh_cache(record)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n field = record._fields[self.name]\n parent_field = field._attrs.get('resize_based_on')\n _logger.debug('Refreshing Image Cache from the field %s of object %s id : %s' % (field.name, record._name, record.id))\n ctx = record.env.context.copy()\n ctx['refresh_image_cache'] = True\n ctx['bin_base64_%s' % field._attrs.get('resize_based_on')] = True\n resized_image = None\n if parent_field:\n original_binary = getattr(record, parent_field)\n size = (field._attrs.get('height'), field._attrs.get('width'))\n resized_image = image_resize_image(original_binary, size)\n record.with_context(**ctx)[field.name] = resized_image\n<|end_body_1|>\n\n<|body_start_2|>\n context = record.env.context\n field_name = self.name\n if not context.get('bin_size_%s' % field_name) and (not context.get('bin_base64_%s' % field_name)) and storage.external_storage_server:\n if context.get('bin_size'):\n context.pop('bin_size')\n return storage.get_url(binary_uid)\n else:\n return super(ImageField, self)._read_binary(storage, record, binary_uid)\n<|end_body_2|>\n", "class_docstring": "Class for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized", "class_name": "ImageField", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass ImageField:\n \"\"\"Class for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized\"\"\"\n\n def _compute_write(self, records):\n \"\"\"Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset\"\"\"\n <|body_0|>\n\n def _refresh_cache(self, record):\n \"\"\"Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field\"\"\"\n <|body_1|>\n\n def _read_binary(self, storage, record, binary_uid):\n \"\"\"Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n field_name = self.name\n for record in records:\n context = record.env.context\n binary_value = record[field_name]\n field_object = record._fields[field_name]\n parent_field = field_object._attrs.get('resize_based_on')\n if parent_field and (not record.env.context.get('refresh_image_cache')):\n record[parent_field] = binary_value\n return\n else:\n width = field_object._attrs.get('height')\n height = field_object._attrs.get('width')\n if width or height:\n size = (height or 64, width or 64)\n binary_value = image_resize_image(binary_value, size)\n self._write_binary(record, field_name, binary_value)\n for _name, _field in record._fields.items():\n if not isinstance(_field, BinaryField):\n continue\n if _field._attrs.get('resize_based_on') == field_name:\n _field._refresh_cache(record)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n field = record._fields[self.name]\n parent_field = field._attrs.get('resize_based_on')\n _logger.debug('Refreshing Image Cache from the field %s of object %s id : %s' % (field.name, record._name, record.id))\n ctx = record.env.context.copy()\n ctx['refresh_image_cache'] = True\n ctx['bin_base64_%s' % field._attrs.get('resize_based_on')] = True\n resized_image = None\n if parent_field:\n original_binary = getattr(record, parent_field)\n size = (field._attrs.get('height'), field._attrs.get('width'))\n resized_image = image_resize_image(original_binary, size)\n record.with_context(**ctx)[field.name] = resized_image\n<|end_body_1|>\n\n<|body_start_2|>\n context = record.env.context\n field_name = self.name\n if not context.get('bin_size_%s' % field_name) and (not context.get('bin_base64_%s' % field_name)) and storage.external_storage_server:\n if context.get('bin_size'):\n context.pop('bin_size')\n return storage.get_url(binary_uid)\n else:\n return super(ImageField, self)._read_binary(storage, record, binary_uid)\n<|end_body_2|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000497", "length_bytes": 13811, "license_type": "no_license", "methods": [{"docstring": "Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset", "name": "_compute_write", "signature": "def _compute_write(self, records)"}, {"docstring": "Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field", "name": "_refresh_cache", "signature": "def _refresh_cache(self, record)"}, {"docstring": "Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file", "name": "_read_binary", "signature": "def _read_binary(self, storage, record, binary_uid)"}], "n_methods": 3, "original_id": "stack_v2_sparse_classes_30k_train_036048", "prompt": "Implement the Python class `ImageField` described below.\n\nClass description:\nClass for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized\n\nMethod signatures and docstrings:\n- def _compute_write(self, records): Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset\n- def _refresh_cache(self, record): Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field\n- def _read_binary(self, storage, record, binary_uid): Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file", "prompted_full_text": "Implement the Python class `ImageField` described below.\n\nClass description:\nClass for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized\n\nMethod signatures and docstrings:\n- def _compute_write(self, records): Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset\n- def _refresh_cache(self, record): Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field\n- def _read_binary(self, storage, record, binary_uid): Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file\n\n<|skeleton|>\nclass ImageField:\n \"\"\"Class for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized\"\"\"\n\n def _compute_write(self, records):\n \"\"\"Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset\"\"\"\n <|body_0|>\n\n def _refresh_cache(self, record):\n \"\"\"Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field\"\"\"\n <|body_1|>\n\n def _read_binary(self, storage, record, binary_uid):\n \"\"\"Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n field_name = self.name\n for record in records:\n context = record.env.context\n binary_value = record[field_name]\n field_object = record._fields[field_name]\n parent_field = field_object._attrs.get('resize_based_on')\n if parent_field and (not record.env.context.get('refresh_image_cache')):\n record[parent_field] = binary_value\n return\n else:\n width = field_object._attrs.get('height')\n height = field_object._attrs.get('width')\n if width or height:\n size = (height or 64, width or 64)\n binary_value = image_resize_image(binary_value, size)\n self._write_binary(record, field_name, binary_value)\n for _name, _field in record._fields.items():\n if not isinstance(_field, BinaryField):\n continue\n if _field._attrs.get('resize_based_on') == field_name:\n _field._refresh_cache(record)\n return\n<|end_body_0|>\n\n<|body_start_1|>\n field = record._fields[self.name]\n parent_field = field._attrs.get('resize_based_on')\n _logger.debug('Refreshing Image Cache from the field %s of object %s id : %s' % (field.name, record._name, record.id))\n ctx = record.env.context.copy()\n ctx['refresh_image_cache'] = True\n ctx['bin_base64_%s' % field._attrs.get('resize_based_on')] = True\n resized_image = None\n if parent_field:\n original_binary = getattr(record, parent_field)\n size = (field._attrs.get('height'), field._attrs.get('width'))\n resized_image = image_resize_image(original_binary, size)\n record.with_context(**ctx)[field.name] = resized_image\n<|end_body_1|>\n\n<|body_start_2|>\n context = record.env.context\n field_name = self.name\n if not context.get('bin_size_%s' % field_name) and (not context.get('bin_base64_%s' % field_name)) and storage.external_storage_server:\n if context.get('bin_size'):\n context.pop('bin_size')\n return storage.get_url(binary_uid)\n else:\n return super(ImageField, self)._read_binary(storage, record, binary_uid)\n<|end_body_2|>\n", "revision_id": "673dd0f2a7c0b69a984342b20f55164a97a00529", "skeleton": "<|skeleton|>\nclass ImageField:\n \"\"\"Class for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized\"\"\"\n\n def _compute_write(self, records):\n \"\"\"Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset\"\"\"\n <|body_0|>\n\n def _refresh_cache(self, record):\n \"\"\"Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field\"\"\"\n <|body_1|>\n\n def _read_binary(self, storage, record, binary_uid):\n \"\"\"Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file\"\"\"\n <|body_2|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class ImageField:\n \"\"\"Class for all ImageField type The __init__ should take optional parameters as bellow :param {string} resize_based_on: name of the field that should be resized :param {integer} width: width of the image resized :param {integer} height: height of the image resized\"\"\"\n\n def _compute_write(self, records):\n \"\"\"Control how a value of ImageField should be updated, when an ImageField field is updated should consider the case: - current field is parent field or child field: the parent field should be always update first, then update related child fields. :param {openerp.models.BaseModel} records: current working recordset\"\"\"\n field_name = self.name\n for record in records:\n context = record.env.context\n binary_value = record[field_name]\n field_object = record._fields[field_name]\n parent_field = field_object._attrs.get('resize_based_on')\n if parent_field and (not record.env.context.get('refresh_image_cache')):\n record[parent_field] = binary_value\n return\n else:\n width = field_object._attrs.get('height')\n height = field_object._attrs.get('width')\n if width or height:\n size = (height or 64, width or 64)\n binary_value = image_resize_image(binary_value, size)\n self._write_binary(record, field_name, binary_value)\n for _name, _field in record._fields.items():\n if not isinstance(_field, BinaryField):\n continue\n if _field._attrs.get('resize_based_on') == field_name:\n _field._refresh_cache(record)\n return\n\n def _refresh_cache(self, record):\n \"\"\"Refresh the cache of the child field to resize again based on new binary value from parent field or totally remove all if parent field is not set. :params {openerp.models.BaseModel} record: current recordset (one) to update value for binary field\"\"\"\n field = record._fields[self.name]\n parent_field = field._attrs.get('resize_based_on')\n _logger.debug('Refreshing Image Cache from the field %s of object %s id : %s' % (field.name, record._name, record.id))\n ctx = record.env.context.copy()\n ctx['refresh_image_cache'] = True\n ctx['bin_base64_%s' % field._attrs.get('resize_based_on')] = True\n resized_image = None\n if parent_field:\n original_binary = getattr(record, parent_field)\n size = (field._attrs.get('height'), field._attrs.get('width'))\n resized_image = image_resize_image(original_binary, size)\n record.with_context(**ctx)[field.name] = resized_image\n\n def _read_binary(self, storage, record, binary_uid):\n \"\"\"Internal method to read contents from binary field depending on storage configuration. :param {dict} storage: default storage configuration :param {openerp.models.BaseModel} record: current recordset to read data from :param {string} binary_uid: binary UID (file name) of binary file\"\"\"\n context = record.env.context\n field_name = self.name\n if not context.get('bin_size_%s' % field_name) and (not context.get('bin_base64_%s' % field_name)) and storage.external_storage_server:\n if context.get('bin_size'):\n context.pop('bin_size')\n return storage.get_url(binary_uid)\n else:\n return super(ImageField, self)._read_binary(storage, record, binary_uid)\n", "source": "the_stack_v2_python_sparse", "source_path": "addons/trobz-extra/binary_field/fields.py", "source_repo": "TinPlusIT05/tms", "split": "test", "star_events_count": 0} {"blob_id": "fe13420446b89d6d86399e9f1086d0ac6bcfa080", "bodies": ["packagestr = request.data\nif packagestr['path'][-1] != '/':\n packagestr['path'] = packagestr['path'] + '/'\npackage_dir = settings.PACKAGE_DIR\nif package_dir[-1] == '/':\n package_dir = package_dir[:-1]\npackagestr['path'] = package_dir + packagestr['path']\ncmd = 'rpm -ivh ' + packagestr['path'] + packagestr['filename']\ncode, resultMSG = commands.getstatusoutput(cmd)\nif code != 0:\n return_error = {'errormsg': resultMSG, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\nMSG = {'message': resultMSG}\nreturn_dict = {'result': MSG}\nreturn Response(return_dict, status=status.HTTP_200_OK)", "packagestr = request.data\nif packagestr['forcedel'] is not True:\n cmd = 'rpm -e ' + packagestr['package']\nelse:\n cmd = 'rpm -e ' + packagestr['package'] + ' --nodeps'\ncode, result = commands.getstatusoutput(cmd)\nif code != 0:\n return_error = {'errormsg': result, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\nMSGlist = []\nMSGlist.append(packagestr['package'] + ':' + result)\nreturn Response(MSGlist, status=status.HTTP_200_OK)"], "bodies_text": "<|body_start_0|>\n packagestr = request.data\n if packagestr['path'][-1] != '/':\n packagestr['path'] = packagestr['path'] + '/'\n package_dir = settings.PACKAGE_DIR\n if package_dir[-1] == '/':\n package_dir = package_dir[:-1]\n packagestr['path'] = package_dir + packagestr['path']\n cmd = 'rpm -ivh ' + packagestr['path'] + packagestr['filename']\n code, resultMSG = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': resultMSG, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSG = {'message': resultMSG}\n return_dict = {'result': MSG}\n return Response(return_dict, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n packagestr = request.data\n if packagestr['forcedel'] is not True:\n cmd = 'rpm -e ' + packagestr['package']\n else:\n cmd = 'rpm -e ' + packagestr['package'] + ' --nodeps'\n code, result = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': result, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSGlist = []\n MSGlist.append(packagestr['package'] + ':' + result)\n return Response(MSGlist, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "class_docstring": "install packages,uninstall packages ['post', 'delete']", "class_name": "packages", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass packages:\n \"\"\"install packages,uninstall packages ['post', 'delete']\"\"\"\n\n def post(self, request, format=None):\n \"\"\"install packages\"\"\"\n <|body_0|>\n\n def delete(self, request, format=None):\n \"\"\"uninstall packages\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n packagestr = request.data\n if packagestr['path'][-1] != '/':\n packagestr['path'] = packagestr['path'] + '/'\n package_dir = settings.PACKAGE_DIR\n if package_dir[-1] == '/':\n package_dir = package_dir[:-1]\n packagestr['path'] = package_dir + packagestr['path']\n cmd = 'rpm -ivh ' + packagestr['path'] + packagestr['filename']\n code, resultMSG = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': resultMSG, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSG = {'message': resultMSG}\n return_dict = {'result': MSG}\n return Response(return_dict, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n packagestr = request.data\n if packagestr['forcedel'] is not True:\n cmd = 'rpm -e ' + packagestr['package']\n else:\n cmd = 'rpm -e ' + packagestr['package'] + ' --nodeps'\n code, result = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': result, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSGlist = []\n MSGlist.append(packagestr['package'] + ':' + result)\n return Response(MSGlist, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000498", "length_bytes": 7576, "license_type": "no_license", "methods": [{"docstring": "install packages", "name": "post", "signature": "def post(self, request, format=None)"}, {"docstring": "uninstall packages", "name": "delete", "signature": "def delete(self, request, format=None)"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_003278", "prompt": "Implement the Python class `packages` described below.\n\nClass description:\ninstall packages,uninstall packages ['post', 'delete']\n\nMethod signatures and docstrings:\n- def post(self, request, format=None): install packages\n- def delete(self, request, format=None): uninstall packages", "prompted_full_text": "Implement the Python class `packages` described below.\n\nClass description:\ninstall packages,uninstall packages ['post', 'delete']\n\nMethod signatures and docstrings:\n- def post(self, request, format=None): install packages\n- def delete(self, request, format=None): uninstall packages\n\n<|skeleton|>\nclass packages:\n \"\"\"install packages,uninstall packages ['post', 'delete']\"\"\"\n\n def post(self, request, format=None):\n \"\"\"install packages\"\"\"\n <|body_0|>\n\n def delete(self, request, format=None):\n \"\"\"uninstall packages\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n packagestr = request.data\n if packagestr['path'][-1] != '/':\n packagestr['path'] = packagestr['path'] + '/'\n package_dir = settings.PACKAGE_DIR\n if package_dir[-1] == '/':\n package_dir = package_dir[:-1]\n packagestr['path'] = package_dir + packagestr['path']\n cmd = 'rpm -ivh ' + packagestr['path'] + packagestr['filename']\n code, resultMSG = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': resultMSG, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSG = {'message': resultMSG}\n return_dict = {'result': MSG}\n return Response(return_dict, status=status.HTTP_200_OK)\n<|end_body_0|>\n\n<|body_start_1|>\n packagestr = request.data\n if packagestr['forcedel'] is not True:\n cmd = 'rpm -e ' + packagestr['package']\n else:\n cmd = 'rpm -e ' + packagestr['package'] + ' --nodeps'\n code, result = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': result, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSGlist = []\n MSGlist.append(packagestr['package'] + ':' + result)\n return Response(MSGlist, status=status.HTTP_200_OK)\n<|end_body_1|>\n", "revision_id": "7f801a569a396a27371d0831752595877c224a6b", "skeleton": "<|skeleton|>\nclass packages:\n \"\"\"install packages,uninstall packages ['post', 'delete']\"\"\"\n\n def post(self, request, format=None):\n \"\"\"install packages\"\"\"\n <|body_0|>\n\n def delete(self, request, format=None):\n \"\"\"uninstall packages\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class packages:\n \"\"\"install packages,uninstall packages ['post', 'delete']\"\"\"\n\n def post(self, request, format=None):\n \"\"\"install packages\"\"\"\n packagestr = request.data\n if packagestr['path'][-1] != '/':\n packagestr['path'] = packagestr['path'] + '/'\n package_dir = settings.PACKAGE_DIR\n if package_dir[-1] == '/':\n package_dir = package_dir[:-1]\n packagestr['path'] = package_dir + packagestr['path']\n cmd = 'rpm -ivh ' + packagestr['path'] + packagestr['filename']\n code, resultMSG = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': resultMSG, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSG = {'message': resultMSG}\n return_dict = {'result': MSG}\n return Response(return_dict, status=status.HTTP_200_OK)\n\n def delete(self, request, format=None):\n \"\"\"uninstall packages\"\"\"\n packagestr = request.data\n if packagestr['forcedel'] is not True:\n cmd = 'rpm -e ' + packagestr['package']\n else:\n cmd = 'rpm -e ' + packagestr['package'] + ' --nodeps'\n code, result = commands.getstatusoutput(cmd)\n if code != 0:\n return_error = {'errormsg': result, 'errorcode': code}\n return Response(return_error, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n MSGlist = []\n MSGlist.append(packagestr['package'] + ':' + result)\n return Response(MSGlist, status=status.HTTP_200_OK)\n", "source": "the_stack_v2_python_sparse", "source_path": "Python_projects/flask_projects/unicorn_project/packages/views.py", "source_repo": "sdtimothy8/Coding", "split": "test", "star_events_count": 0} {"blob_id": "763a4b843a0bb7607b1de9488be8bf2415897f08", "bodies": ["def dfs(node, depth):\n left = depth\n if node.left:\n left = dfs(node.left, depth + 1)\n if left < 0:\n return left\n right = depth\n if node.right:\n right = dfs(node.right, depth + 1)\n if right < 0:\n return right\n if left - right > 1 or right - left > 1:\n return -1\n return max(left, right)\nif not root:\n return True\nres = dfs(root, 0)\nreturn res >= 0", "def height(root: TreeNode) -> int:\n if not root:\n return 0\n leftHeight = height(root.left)\n rightHeight = height(root.right)\n if leftHeight == -1 or rightHeight == -1 or abs(leftHeight - rightHeight) > 1:\n return -1\n else:\n return max(leftHeight, rightHeight) + 1\nreturn height(root) >= 0"], "bodies_text": "<|body_start_0|>\n def dfs(node, depth):\n left = depth\n if node.left:\n left = dfs(node.left, depth + 1)\n if left < 0:\n return left\n right = depth\n if node.right:\n right = dfs(node.right, depth + 1)\n if right < 0:\n return right\n if left - right > 1 or right - left > 1:\n return -1\n return max(left, right)\n if not root:\n return True\n res = dfs(root, 0)\n return res >= 0\n<|end_body_0|>\n\n<|body_start_1|>\n def height(root: TreeNode) -> int:\n if not root:\n return 0\n leftHeight = height(root.left)\n rightHeight = height(root.right)\n if leftHeight == -1 or rightHeight == -1 or abs(leftHeight - rightHeight) > 1:\n return -1\n else:\n return max(leftHeight, rightHeight) + 1\n return height(root) >= 0\n<|end_body_1|>\n", "class_docstring": "", "class_name": "Solution", "detected_licenses": [], "format_version": "stack_v2_sparse_python_classes_v1", "full_text": "<|skeleton|>\nclass Solution:\n\n def isBalanced1(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户\"\"\"\n <|body_0|>\n\n def isBalanced(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(node, depth):\n left = depth\n if node.left:\n left = dfs(node.left, depth + 1)\n if left < 0:\n return left\n right = depth\n if node.right:\n right = dfs(node.right, depth + 1)\n if right < 0:\n return right\n if left - right > 1 or right - left > 1:\n return -1\n return max(left, right)\n if not root:\n return True\n res = dfs(root, 0)\n return res >= 0\n<|end_body_0|>\n\n<|body_start_1|>\n def height(root: TreeNode) -> int:\n if not root:\n return 0\n leftHeight = height(root.left)\n rightHeight = height(root.right)\n if leftHeight == -1 or rightHeight == -1 or abs(leftHeight - rightHeight) > 1:\n return -1\n else:\n return max(leftHeight, rightHeight) + 1\n return height(root) >= 0\n<|end_body_1|>\n", "id": "stack_v2_sparse_classes_75kplus_test_000499", "length_bytes": 2368, "license_type": "no_license", "methods": [{"docstring": "执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户", "name": "isBalanced1", "signature": "def isBalanced1(self, root: TreeNode) -> bool"}, {"docstring": "执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用", "name": "isBalanced", "signature": "def isBalanced(self, root: TreeNode) -> bool"}], "n_methods": 2, "original_id": "stack_v2_sparse_classes_30k_train_014592", "prompt": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isBalanced1(self, root: TreeNode) -> bool: 执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户\n- def isBalanced(self, root: TreeNode) -> bool: 执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用", "prompted_full_text": "Implement the Python class `Solution` described below.\n\nClass description:\nImplement the Solution class.\n\nMethod signatures and docstrings:\n- def isBalanced1(self, root: TreeNode) -> bool: 执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户\n- def isBalanced(self, root: TreeNode) -> bool: 执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用\n\n<|skeleton|>\nclass Solution:\n\n def isBalanced1(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户\"\"\"\n <|body_0|>\n\n def isBalanced(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用\"\"\"\n <|body_1|>\n\n<|end_skeleton|>\n\n<|body_start_0|>\n def dfs(node, depth):\n left = depth\n if node.left:\n left = dfs(node.left, depth + 1)\n if left < 0:\n return left\n right = depth\n if node.right:\n right = dfs(node.right, depth + 1)\n if right < 0:\n return right\n if left - right > 1 or right - left > 1:\n return -1\n return max(left, right)\n if not root:\n return True\n res = dfs(root, 0)\n return res >= 0\n<|end_body_0|>\n\n<|body_start_1|>\n def height(root: TreeNode) -> int:\n if not root:\n return 0\n leftHeight = height(root.left)\n rightHeight = height(root.right)\n if leftHeight == -1 or rightHeight == -1 or abs(leftHeight - rightHeight) > 1:\n return -1\n else:\n return max(leftHeight, rightHeight) + 1\n return height(root) >= 0\n<|end_body_1|>\n", "revision_id": "d613ed8a5a2c15ace7d513965b372d128845d66a", "skeleton": "<|skeleton|>\nclass Solution:\n\n def isBalanced1(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户\"\"\"\n <|body_0|>\n\n def isBalanced(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用\"\"\"\n <|body_1|>\n\n<|end_skeleton|>", "snapshot_name": "stack_v2_sparse_classes_75kplus", "snapshot_source_dir": "data/stack_v2_sparse_classes_30k", "snapshot_total_rows": 75829, "solution": "class Solution:\n def isBalanced1(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 76 ms , 在所有 Python3 提交中击败了 31.32% 的用户 内存消耗: 19.9 MB , 在所有 Python3 提交中击败了 6.97% 的用户\"\"\"\n def dfs(node, depth):\n left = depth\n if node.left:\n left = dfs(node.left, depth + 1)\n if left < 0:\n return left\n right = depth\n if node.right:\n right = dfs(node.right, depth + 1)\n if right < 0:\n return right\n if left - right > 1 or right - left > 1:\n return -1\n return max(left, right)\n if not root:\n return True\n res = dfs(root, 0)\n return res >= 0\n\n def isBalanced(self, root: TreeNode) -> bool:\n \"\"\"执行用时: 60 ms , 在所有 Python3 提交中击败了 72.75% 的用户 内存消耗: 19.6 MB , 在所有 Python3 提交中击败了 36.65% 的用\"\"\"\n def height(root: TreeNode) -> int:\n if not root:\n return 0\n leftHeight = height(root.left)\n rightHeight = height(root.right)\n if leftHeight == -1 or rightHeight == -1 or abs(leftHeight - rightHeight) > 1:\n return -1\n else:\n return max(leftHeight, rightHeight) + 1\n return height(root) >= 0\n", "source": "the_stack_v2_python_sparse", "source_path": "平衡二叉树.py", "source_repo": "nomboy/leetcode", "split": "test", "star_events_count": 0}